Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
+Antoine Tenart <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Archit Taneja <archit@ti.com>
Arnaud Patard <arnaud.patard@rtp-net.org>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
+Boris Brezillon <boris.brezillon@free-electrons.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Christoph Hellwig <hch@lst.de>
Linas Vepstas <linas@austin.ibm.com>
Mark Brown <broonie@sirena.org.uk>
Matthieu CASTET <castet.matthieu@free.fr>
+Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
+Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
N: Mauro Carvalho Chehab
E: m.chehab@samsung.org
+E: mchehab@osg.samsung.com
E: mchehab@infradead.org
D: Media subsystem (V4L/DVB) drivers and core
D: EDAC drivers and EDAC 3.0 core rework
What: /config/usb-gadget/gadget/functions/uvc.name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: UVC function directory
streaming_maxburst - 0..15 (ss only)
What: /config/usb-gadget/gadget/functions/uvc.name/control
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Control descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/class
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Super speed control class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Full speed control class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Terminal descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Output terminal descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default output terminal descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Camera terminal descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default camera terminal descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/control/processing
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Processing unit descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default processing unit descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/control/header
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Control header descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific control header descriptors
dwClockFrequency
bcdUVC
What: /config/usb-gadget/gadget/functions/uvc.name/streaming
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Streaming descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Super speed streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: High speed streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Full speed streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Color matching descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default color matching descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: MJPEG format descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific MJPEG format descriptors
All attributes read only,
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific MJPEG frame descriptors
dwFrameInterval - indicates how frame interval can be
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Uncompressed format descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific uncompressed format descriptors
bmaControls - this format's data for bmaControls in
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific uncompressed frame descriptors
dwFrameInterval - indicates how frame interval can be
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Streaming header descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific streaming header descriptors
All attributes read only:
-What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
+What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
Date: March 2014
KernelVersion: 3.15
Contact: Matt Ranostay <mranostay@gmail.com>
!Edrivers/base/platform.c
!Edrivers/base/bus.c
</sect1>
- <sect1><title>Device Drivers DMA Management</title>
+ <sect1>
+ <title>Buffer Sharing and Synchronization</title>
+ <para>
+ The dma-buf subsystem provides the framework for sharing buffers
+ for hardware (DMA) access across multiple device drivers and
+ subsystems, and for synchronizing asynchronous hardware access.
+ </para>
+ <para>
+ This is used, for example, by drm "prime" multi-GPU support, but
+ is of course not limited to GPU use cases.
+ </para>
+ <para>
+ The three main components of this are: (1) dma-buf, representing
+ a sg_table and exposed to userspace as a file descriptor to allow
+ passing between devices, (2) fence, which provides a mechanism
+ to signal when one device as finished access, and (3) reservation,
+ which manages the shared or exclusive fence(s) associated with
+ the buffer.
+ </para>
+ <sect2><title>dma-buf</title>
!Edrivers/dma-buf/dma-buf.c
+!Iinclude/linux/dma-buf.h
+ </sect2>
+ <sect2><title>reservation</title>
+!Pdrivers/dma-buf/reservation.c Reservation Object Overview
+!Edrivers/dma-buf/reservation.c
+!Iinclude/linux/reservation.h
+ </sect2>
+ <sect2><title>fence</title>
!Edrivers/dma-buf/fence.c
-!Edrivers/dma-buf/seqno-fence.c
!Iinclude/linux/fence.h
+!Edrivers/dma-buf/seqno-fence.c
!Iinclude/linux/seqno-fence.h
-!Edrivers/dma-buf/reservation.c
-!Iinclude/linux/reservation.h
!Edrivers/dma-buf/sync_file.c
!Iinclude/linux/sync_file.h
+ </sect2>
+ </sect1>
+ <sect1><title>Device Drivers DMA Management</title>
!Edrivers/base/dma-coherent.c
!Edrivers/base/dma-mapping.c
</sect1>
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
display-timings are used instead.
Optional properties (required if display-timings are used):
+ - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- display-timings : A node that describes the display timings as defined in
Documentation/devicetree/bindings/display/display-timing.txt.
- fsl,data-mapping : should be "spwg" or "jeida"
- "ti,ina220" for ina220
- "ti,ina226" for ina226
- "ti,ina230" for ina230
+ - "ti,ina231" for ina231
- reg: I2C address
Optional properties:
- our-claim-gpio: The GPIO that we use to claim the bus.
- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
Note that some implementations may only support a single other master.
-- Standard I2C mux properties. See mux.txt in this directory.
-- Single I2C child bus node at reg 0. See mux.txt in this directory.
+- Standard I2C mux properties. See i2c-mux.txt in this directory.
+- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
Optional properties:
- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
parents.
-Furthermore, I2C mux properties and child nodes. See mux.txt in this directory.
+Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
+directory.
Example:
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
- mux-gpios: list of gpios used to control the muxer
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
Optional properties:
- idle-state: value to set the muxer to when idle. When no value is
be numbered based on their order in the device tree.
Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output using the list of
+in the relevant node's reg property will be output using the list of
GPIOs, the first in the list holding the least-significant value.
If an idle state is defined, using the idle-state (optional) property,
* Standard pinctrl properties that specify the pin mux state for each child
bus. See ../pinctrl/pinctrl-bindings.txt.
-* Standard I2C mux properties. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
For each named state defined in the pinctrl-names property, an I2C child bus
will be created. I2C child bus numbers are assigned based on the index into
- compatible: i2c-mux-reg
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
Optional properties:
- reg: this pair of <offset size> specifies the register to control the mux.
given, it defaults to the last value used.
Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output to the register.
+in the relevant node's reg property will be output to the register.
If an idle state is defined, using the idle-state (optional) property,
whenever an access is not being made to a device on a child bus, the
initialization. This is an array of 28 values(u8).
- marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
- firmware will use the pin to wakeup host system.
+ firmware will use the pin to wakeup host system (u16).
- marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
platform. The value will be configured to firmware. This
- is needed to work chip's sleep feature as expected.
+ is needed to work chip's sleep feature as expected (u16).
- interrupt-parent: phandle of the parent interrupt controller
- interrupts : interrupt pin number to the cpu. Driver will request an irq based
on this interrupt number. During system suspend, the irq will be
0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
0x00 0x00 0xf0 0x00>;
- marvell,wakeup-pin = <0x0d>;
- marvell,wakeup-gap-ms = <0x64>;
+ marvell,wakeup-pin = /bits/ 16 <0x0d>;
+ marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
};
};
- reg: The i2c address. Value depends on the state of ADDR0
and ADDR1, as wired in hardware.
+Optional properties:
+ - clock-names: If provided must be "mclk".
+ - clocks: phandle + clock-specifiers for the clock that provides
+ the audio master clock for the device.
+
Examples:
#include <dt-bindings/sound/adau17x1.h>
adau1361@38 {
compatible = "adi,adau1761";
reg = <0x38>;
+
+ clock-names = "mclk";
+ clocks = <&audio_clock>;
};
};
--- /dev/null
+Analog Devices ADAU7002 Stereo PDM-to-I2S/TDM Converter
+
+Required properties:
+
+ - compatible: Must be "adi,adau7002"
+
+Optional properties:
+
+ - IOVDD-supply: Phandle and specifier for the power supply providing the IOVDD
+ supply as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+
+ If this property is not present it is assumed that the supply pin is
+ hardwired to always on.
+
+Example:
+ adau7002: pdm-to-i2s {
+ compatible = "adi,adau7002";
+ IOVDD-supply = <&supply>;
+ };
--- /dev/null
+BROADCOM Cygnus Audio I2S/TDM/SPDIF controller
+
+Required properties:
+ - compatible : "brcm,cygnus-audio"
+ - #address-cells: 32bit valued, 1 cell.
+ - #size-cells: 32bit valued, 0 cell.
+ - reg : Should contain audio registers location and length
+ - reg-names: names of the registers listed in "reg" property
+ Valid names are "aud" and "i2s_in". "aud" contains a
+ set of DMA, I2S_OUT and SPDIF registers. "i2s_in" contains
+ a set of I2S_IN registers.
+ - clocks: PLL and leaf clocks used by audio ports
+ - assigned-clocks: PLL and leaf clocks
+ - assigned-clock-parents: parent clocks of the assigned clocks
+ (usually the PLL)
+ - assigned-clock-rates: List of clock frequencies of the
+ assigned clocks
+ - clock-names: names of 3 leaf clocks used by audio ports
+ Valid names are "ch0_audio", "ch1_audio", "ch2_audio"
+ - interrupts: audio DMA interrupt number
+
+SSP Subnode properties:
+- reg: The index of ssp port interface to use
+ Valid value are 0, 1, 2, or 3 (for spdif)
+
+Example:
+ cygnus_audio: audio@180ae000 {
+ compatible = "brcm,cygnus-audio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x180ae000 0xafd>, <0x180aec00 0x1f8>;
+ reg-names = "aud", "i2s_in";
+ clocks = <&audiopll BCM_CYGNUS_AUDIOPLL_CH0>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH1>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH2>;
+ assigned-clocks = <&audiopll BCM_CYGNUS_AUDIOPLL>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH0>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH1>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH2>;
+ assigned-clock-parents = <&audiopll BCM_CYGNUS_AUDIOPLL>;
+ assigned-clock-rates = <1769470191>,
+ <0>,
+ <0>,
+ <0>;
+ clock-names = "ch0_audio", "ch1_audio", "ch2_audio";
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+
+ ssp0: ssp_port@0 {
+ reg = <0>;
+ status = "okay";
+ };
+
+ ssp1: ssp_port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ ssp2: ssp_port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ spdif: spdif_port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+ };
Required properties:
- - compatible : "delta,dfbmcs320"
+ - compatible : "delta,dfbmcs320" or "linux,bt-sco"
Example:
--- /dev/null
+CS35L33 Speaker Amplifier
+
+Required properties:
+
+ - compatible : "cirrus,cs35l33"
+
+ - reg : the I2C address of the device for I2C
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : gpio used to reset the amplifier
+
+ - interrupt-parent : Specifies the phandle of the interrupt controller to
+ which the IRQs from CS35L33 are delivered to.
+ - interrupts : IRQ line info CS35L33.
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for further information relating to interrupt properties)
+
+ - cirrus,boost-ctl : Booster voltage use to supply the amp. If the value is
+ 0, then VBST = VP. If greater than 0, the boost voltage will be 3300mV with
+ a value of 1 and will increase at a step size of 100mV until a maximum of
+ 8000mV.
+
+ - cirrus,ramp-rate : On power up, it affects the time from when the power
+ up sequence begins to the time the audio reaches a full-scale output.
+ On power down, it affects the time from when the power-down sequence
+ begins to when the amplifier disables the PWM outputs. If this property
+ is not set then soft ramping will be disabled and ramp time would be
+ 20ms. If this property is set to 0,1,2,3 then ramp times would be 40ms,
+ 60ms,100ms,175ms respectively for 48KHz sample rate.
+
+ - cirrus,boost-ipk : The maximum current allowed for the boost converter.
+ The range starts at 1850000uA and goes to a maximum of 3600000uA
+ with a step size of 15625uA. The default is 2500000uA.
+
+ - cirrus,imon-adc-scale : Configures the scaling of data bits from the IMON
+ ADC data word. This property can be set as a value of 0 for bits 15 down
+ to 0, 6 for 21 down to 6, 7, for 22 down to 7, 8 for 23 down to 8.
+
+
+Optional H/G Algorithm sub-node:
+
+The cs35l33 node can have a single "cirrus,hg-algo" sub-node that will enable
+the internal H/G Algorithm.
+
+ - cirrus,hg-algo : Sub-node for internal Class H/G algorithm that
+ controls the amplifier supplies.
+
+Optional properties for the "cirrus,hg-algo" sub-node:
+
+ - cirrus,mem-depth : Memory depth for the Class H/G algorithm measured in
+ LRCLK cycles. If this property is set to 0, 1, 2, or 3 then the memory
+ depths will be 1, 4, 8, 16 LRCLK cycles. The default is 16 LRCLK cycles.
+
+ cirrus,release-rate : The number of consecutive LRCLK periods before
+ allowing release condition tracking updates. The number of LRCLK periods
+ start at 3 to a maximum of 255.
+
+ - cirrus,ldo-thld : Configures the signal threshold at which the PWM output
+ stage enters LDO operation. Starts as a default value of 50mV for a value
+ of 1 and increases with a step size of 50mV to a maximum of 750mV (value of
+ 0xF).
+
+ - cirrus,ldo-path-disable : This is a boolean property. If present, the H/G
+ algorithm uses the max detection path. If not present, the LDO
+ detection path is used.
+
+ - cirrus,ldo-entry-delay : The LDO entry delay in milliseconds before the H/G
+ algorithm switches to the LDO voltage. This property can be set to values
+ from 0 to 7 for delays of 5ms, 10ms, 50ms, 100ms, 200ms, 500ms, 1000ms.
+ The default is 100ms.
+
+ - cirrus,vp-hg-auto : This is a boolean property. When set, class H/G VPhg
+ automatic updating is enabled.
+
+ - cirrus,vp-hg : Class H/G algorithm VPhg. Controls the H/G algorithm's
+ reference to the VP voltage for when to start generating a boosted VBST.
+ The reference voltage starts at 3000mV with a value of 0x3 and is increased
+ by 100mV per step to a maximum of 5500mV.
+
+ - cirrus,vp-hg-rate : The rate (number of LRCLK periods) at which the VPhg is
+ allowed to increase to a higher voltage when using VPhg automatic
+ tracking. This property can be set to values from 0 to 3 with rates of 128
+ periods, 2048 periods, 32768 periods, and 524288 periods.
+ The default is 32768 periods.
+
+ - cirrus,vp-hg-va : VA calculation reference for automatic VPhg tracking
+ using VPMON. This property can be set to values from 0 to 6 starting at
+ 1800mV with a step size of 50mV up to a maximum value of 1750mV.
+ Default is 1800mV.
+
+Example:
+
+cs35l33: cs35l33@40 {
+ compatible = "cirrus,cs35l33";
+ reg = <0x40>;
+
+ VA-supply = <&ldo5_reg>;
+ VP-supply = <&ldo5_reg>;
+
+ interrupt-parent = <&gpio8>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&cs47l91 34 0>;
+
+ cirrus,ramp-rate = <0x0>;
+ cirrus,boost-ctl = <0x30>; /* VBST = 8000mV */
+ cirrus,boost-ipk = <0xE0>; /* 3600mA */
+ cirrus,imon-adc-scale = <0> /* Bits 15 down to 0 */
+
+ cirrus,hg-algo {
+ cirrus,mem-depth = <0x3>;
+ cirrus,release-rate = <0x3>;
+ cirrus,ldo-thld = <0x1>;
+ cirrus,ldo-path-disable = <0x0>;
+ cirrus,ldo-entry-delay=<0x4>;
+ cirrus,vp-hg-auto;
+ cirrus,vp-hg=<0xF>;
+ cirrus,vp-hg-rate=<0x2>;
+ cirrus,vp-hg-va=<0x0>;
+ };
+};
--- /dev/null
+CS53L30 audio CODEC
+
+Required properties:
+
+ - compatible : "cirrus,cs53l30"
+
+ - reg : the I2C address of the device
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : a GPIO spec for the reset pin.
+
+ - mute-gpios : a GPIO spec for the MUTE pin. The active state can be either
+ GPIO_ACTIVE_HIGH or GPIO_ACTIVE_LOW, which would be handled
+ by the driver automatically.
+
+ - cirrus,micbias-lvl : Set the output voltage level on the MICBIAS Pin.
+ 0 = Hi-Z
+ 1 = 1.80 V
+ 2 = 2.75 V
+
+ - cirrus,use-sdout2 : This is a boolean property. If present, it indicates
+ the hardware design connects both SDOUT1 and SDOUT2
+ pins to output data. Otherwise, it indicates that
+ only SDOUT1 is connected for data output.
+ * CS53l30 supports 4-channel data output in the same
+ * frame using two different ways:
+ * 1) Normal I2S mode on two data pins -- each SDOUT
+ * carries 2-channel data in the same time.
+ * 2) TDM mode on one signle data pin -- SDOUT1 carries
+ * 4-channel data per frame.
+
+Example:
+
+codec: cs53l30@48 {
+ compatible = "cirrus,cs53l30";
+ reg = <0x48>;
+ reset-gpios = <&gpio 54 0>;
+ VA-supply = <&cs53l30_va>;
+ VP-supply = <&cs53l30_vp>;
+};
one for receive.
- dma-names : "tx" for the transmit channel, "rx" for the receive channel.
+Optional properties:
+ - interrupts: The interrupt line number for the I2S controller. Add this
+ parameter if the I2S controller that you are using does not support DMA.
+
For more details on the 'dma', 'dma-names', 'clock' and 'clock-names'
properties please check:
* resource-names.txt
* DMIC (stands for Digital Microphone Jack)
Note: The "Mic Jack" and "AMIC" are redundant while
- coexsiting in order to support the old bindings
+ coexisting in order to support the old bindings
of wm8962 and sgtl5000.
Optional properties:
--- /dev/null
+Maxim MAX98504 class D mono speaker amplifier
+
+This device supports I2C control interface and an IRQ output signal. It features
+a PCM and PDM digital audio interface (DAI) and a differential analog input.
+
+Required properties:
+
+ - compatible : "maxim,max98504"
+ - reg : should contain the I2C slave device address
+ - DVDD-supply, DIOVDD-supply, PVDD-supply: power supplies for the device,
+ as covered in ../regulator/regulator.txt
+ - interrupts : should specify the interrupt line the device is connected to,
+ as described in ../interrupt-controller/interrupts.txt
+
+Optional properties:
+
+ - maxim,brownout-threshold - the PVDD brownout threshold, the value must be
+ from 0, 1...21 range, corresponding to 2.6V, 2.65V...3.65V voltage range
+ - maxim,brownout-attenuation - the brownout attenuation to the speaker gain
+ applied during the "attack hold" and "timed hold" phase, the value must be
+ from 0...6 (dB) range
+ - maxim,brownout-attack-hold-ms - the brownout attack hold phase time in ms,
+ 0...255 (VBATBROWN_ATTK_HOLD, register 0x0018)
+ - maxim,brownout-timed-hold-ms - the brownout timed hold phase time in ms,
+ 0...255 (VBATBROWN_TIME_HOLD, register 0x0019)
+ - maxim,brownout-release-rate-ms - the brownout release phase step time in ms,
+ 0...255 (VBATBROWN_RELEASE, register 0x001A)
+
+The default value when the above properties are not specified is 0,
+the maxim,brownout-threshold property must be specified to actually enable
+the PVDD brownout protection.
+
+Example:
+
+ max98504@31 {
+ compatible = "maxim,max98504";
+ reg = <0x31>;
+ interrupt-parent = <&gpio_bank_0>;
+ interrupts = <2 0>;
+
+ DVDD-supply = <®ulator>;
+ DIOVDD-supply = <®ulator>;
+ PVDD-supply = <®ulator>;
+};
--- /dev/null
+MAX9860 Mono Audio Voice Codec
+
+Required properties:
+
+ - compatible : "maxim,max9860"
+
+ - reg : the I2C address of the device
+
+ - AVDD-supply, DVDD-supply and DVDDIO-supply : power supplies for
+ the device, as covered in bindings/regulator/regulator.txt
+
+ - clock-names : Required element: "mclk".
+
+ - clocks : A clock specifier for the clock connected as MCLK.
+
+Examples:
+
+ max9860: max9860@10 {
+ compatible = "maxim,max9860";
+ reg = <0x10>;
+
+ AVDD-supply = <®_1v8>;
+ DVDD-supply = <®_1v8>;
+ DVDDIO-supply = <®_3v0>;
+
+ clock-names = "mclk";
+ clocks = <&pck2>;
+ };
--- /dev/null
+Mediatek AFE PCM controller for mt2701
+
+Required properties:
+- compatible = "mediatek,mt2701-audio";
+- reg: register location and size
+- interrupts: Should contain AFE interrupt
+- clock-names: should have these clock names:
+ "infra_sys_audio_clk",
+ "top_audio_mux1_sel",
+ "top_audio_mux2_sel",
+ "top_audio_mux1_div",
+ "top_audio_mux2_div",
+ "top_audio_48k_timing",
+ "top_audio_44k_timing",
+ "top_audpll_mux_sel",
+ "top_apll_sel",
+ "top_aud1_pll_98M",
+ "top_aud2_pll_90M",
+ "top_hadds2_pll_98M",
+ "top_hadds2_pll_294M",
+ "top_audpll",
+ "top_audpll_d4",
+ "top_audpll_d8",
+ "top_audpll_d16",
+ "top_audpll_d24",
+ "top_audintbus_sel",
+ "clk_26m",
+ "top_syspll1_d4",
+ "top_aud_k1_src_sel",
+ "top_aud_k2_src_sel",
+ "top_aud_k3_src_sel",
+ "top_aud_k4_src_sel",
+ "top_aud_k5_src_sel",
+ "top_aud_k6_src_sel",
+ "top_aud_k1_src_div",
+ "top_aud_k2_src_div",
+ "top_aud_k3_src_div",
+ "top_aud_k4_src_div",
+ "top_aud_k5_src_div",
+ "top_aud_k6_src_div",
+ "top_aud_i2s1_mclk",
+ "top_aud_i2s2_mclk",
+ "top_aud_i2s3_mclk",
+ "top_aud_i2s4_mclk",
+ "top_aud_i2s5_mclk",
+ "top_aud_i2s6_mclk",
+ "top_asm_m_sel",
+ "top_asm_h_sel",
+ "top_univpll2_d4",
+ "top_univpll2_d2",
+ "top_syspll_d5";
+
+Example:
+
+ afe: mt2701-afe-pcm@11220000 {
+ compatible = "mediatek,mt2701-audio";
+ reg = <0 0x11220000 0 0x2000>,
+ <0 0x112A0000 0 0x20000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_AUDIO>,
+ <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+ <&topckgen CLK_TOP_AUD_MUX2_DIV>,
+ <&topckgen CLK_TOP_AUD_48K_TIMING>,
+ <&topckgen CLK_TOP_AUD_44K_TIMING>,
+ <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
+ <&topckgen CLK_TOP_APLL_SEL>,
+ <&topckgen CLK_TOP_AUD1PLL_98M>,
+ <&topckgen CLK_TOP_AUD2PLL_90M>,
+ <&topckgen CLK_TOP_HADDS2PLL_98M>,
+ <&topckgen CLK_TOP_HADDS2PLL_294M>,
+ <&topckgen CLK_TOP_AUDPLL>,
+ <&topckgen CLK_TOP_AUDPLL_D4>,
+ <&topckgen CLK_TOP_AUDPLL_D8>,
+ <&topckgen CLK_TOP_AUDPLL_D16>,
+ <&topckgen CLK_TOP_AUDPLL_D24>,
+ <&topckgen CLK_TOP_AUDINTBUS_SEL>,
+ <&clk26m>,
+ <&topckgen CLK_TOP_SYSPLL1_D4>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
+ <&topckgen CLK_TOP_ASM_M_SEL>,
+ <&topckgen CLK_TOP_ASM_H_SEL>,
+ <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_UNIVPLL2_D2>,
+ <&topckgen CLK_TOP_SYSPLL_D5>;
+
+ clock-names = "infra_sys_audio_clk",
+ "top_audio_mux1_sel",
+ "top_audio_mux2_sel",
+ "top_audio_mux1_div",
+ "top_audio_mux2_div",
+ "top_audio_48k_timing",
+ "top_audio_44k_timing",
+ "top_audpll_mux_sel",
+ "top_apll_sel",
+ "top_aud1_pll_98M",
+ "top_aud2_pll_90M",
+ "top_hadds2_pll_98M",
+ "top_hadds2_pll_294M",
+ "top_audpll",
+ "top_audpll_d4",
+ "top_audpll_d8",
+ "top_audpll_d16",
+ "top_audpll_d24",
+ "top_audintbus_sel",
+ "clk_26m",
+ "top_syspll1_d4",
+ "top_aud_k1_src_sel",
+ "top_aud_k2_src_sel",
+ "top_aud_k3_src_sel",
+ "top_aud_k4_src_sel",
+ "top_aud_k5_src_sel",
+ "top_aud_k6_src_sel",
+ "top_aud_k1_src_div",
+ "top_aud_k2_src_div",
+ "top_aud_k3_src_div",
+ "top_aud_k4_src_div",
+ "top_aud_k5_src_div",
+ "top_aud_k6_src_div",
+ "top_aud_i2s1_mclk",
+ "top_aud_i2s2_mclk",
+ "top_aud_i2s3_mclk",
+ "top_aud_i2s4_mclk",
+ "top_aud_i2s5_mclk",
+ "top_aud_i2s6_mclk",
+ "top_asm_m_sel",
+ "top_asm_h_sel",
+ "top_univpll2_d4",
+ "top_univpll2_d2",
+ "top_syspll_d5";
+ };
--- /dev/null
+MT2701 with CS42448 CODEC
+
+Required properties:
+- compatible: "mediatek,mt2701-cs42448-machine"
+- mediatek,platform: the phandle of MT2701 ASoC platform
+- audio-routing: a list of the connections between audio
+- mediatek,audio-codec: the phandles of cs42448 codec
+- mediatek,audio-codec-bt-mrg the phandles of bt-sco dummy codec
+- pinctrl-names: Should contain only one value - "default"
+- pinctrl-0: Should specify pin control groups used for this controller.
+- i2s1-in-sel-gpio1, i2s1-in-sel-gpio2: Should specify two gpio pins to
+ control I2S1-in mux.
+
+Example:
+
+ sound:sound {
+ compatible = "mediatek,mt2701-cs42448-machine";
+ mediatek,platform = <&afe>;
+ /* CS42448 Machine name */
+ audio-routing =
+ "Line Out Jack", "AOUT1L",
+ "Line Out Jack", "AOUT1R",
+ "Line Out Jack", "AOUT2L",
+ "Line Out Jack", "AOUT2R",
+ "Line Out Jack", "AOUT3L",
+ "Line Out Jack", "AOUT3R",
+ "Line Out Jack", "AOUT4L",
+ "Line Out Jack", "AOUT4R",
+ "AIN1L", "AMIC",
+ "AIN1R", "AMIC",
+ "AIN2L", "Tuner In",
+ "AIN2R", "Tuner In",
+ "AIN3L", "Satellite Tuner In",
+ "AIN3R", "Satellite Tuner In",
+ "AIN3L", "AUX In",
+ "AIN3R", "AUX In";
+ mediatek,audio-codec = <&cs42448>;
+ mediatek,audio-codec-bt-mrg = <&bt_sco_codec>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&aud_pins_default>;
+ i2s1-in-sel-gpio1 = <&pio 53 0>;
+ i2s1-in-sel-gpio2 = <&pio 54 0>;
+ };
-MT8173 with RT5650 CODECS
+MT8173 with RT5650 CODECS and HDMI via I2S
Required properties:
- compatible : "mediatek,mt8173-rt5650"
- mediatek,audio-codec: the phandles of rt5650 codecs
+ and of the hdmi encoder node
- mediatek,platform: the phandle of MT8173 ASoC platform
Optional subnodes:
<&rt5650 0> : Default setting. Connect rt5650 I2S1 for capture. (dai_name = rt5645-aif1)
<&rt5650 1> : Connect rt5650 I2S2 for capture. (dai_name = rt5645-aif2)
+- mediatek,mclk: the MCLK source
+ 0 : external oscillator, MCLK = 12.288M
+ 1 : internal source from mt8173, MCLK = sampling rate*256
+
Example:
sound {
compatible = "mediatek,mt8173-rt5650";
- mediatek,audio-codec = <&rt5650>;
+ mediatek,audio-codec = <&rt5650 &hdmi0>;
mediatek,platform = <&afe>;
+ mediatek,mclk = <0>;
codec-capture {
sound-dai = <&rt5650 1>;
};
- interrupts: Interrupt number for McPDM
- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated to the McPDM
+- clocks: phandle for the pdmclk provider, likely <&twl6040>
+- clock-names: Must be "pdmclk"
Example:
interrupt-parent = <&gic>;
ti,hwmods = "mcpdm";
};
+
+In board DTS file the pdmclk needs to be added:
+
+&mcpdm {
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+ status = "okay";
+};
- #clock-cells : it must be 0 if your system has audio_clkout
it must be 1 if your system has audio_clkout0/1/2/3
- clock-frequency : for all audio_clkout0/1/2/3
+- clkout-lr-asynchronous : boolean property. it indicates that audio_clkoutn
+ is asynchronizes with lr-clock.
SSI subnode properties:
- interrupts : Should contain SSI interrupt for PIO transfer
- rockchip,playback-channels: max playback channels, if not set, 8 channels default.
- rockchip,capture-channels: max capture channels, if not set, 2 channels default.
+Required properties for controller which support multi channels
+playback/capture:
+
+- rockchip,grf: the phandle of the syscon node for GRF register.
+
Example for rk3288 I2S controller:
i2s@ff890000 {
- reg : The I2C address of the device.
+Optional properties:
+
+- clocks: The phandle of the master clock to the CODEC
+- clock-names: Should be "mclk"
+
Pins on the device (for linking into audio routes) for RT5514:
* DMIC1L
+++ /dev/null
-Samsung Exynos Odroid X2/U3 audio complex with MAX98090 codec
-
-Required properties:
- - compatible : "samsung,odroidx2-audio" - for Odroid X2 board,
- "samsung,odroidu3-audio" - for Odroid U3 board
- - samsung,model : the user-visible name of this sound complex
- - samsung,i2s-controller : the phandle of the I2S controller
- - samsung,audio-codec : the phandle of the MAX98090 audio codec
- - samsung,audio-routing : a list of the connections between audio
- components; each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's source;
- valid names for sources and sinks are the MAX98090's pins (as
- documented in its binding), and the jacks on the board
- For Odroid X2:
- * Headphone Jack
- * Mic Jack
- * DMIC
-
- For Odroid U3:
- * Headphone Jack
- * Speakers
-
-Example:
-
-sound {
- compatible = "samsung,odroidu3-audio";
- samsung,i2s-controller = <&i2s0>;
- samsung,audio-codec = <&max98090>;
- samsung,model = "Odroid-X2";
- samsung,audio-routing =
- "Headphone Jack", "HPL",
- "Headphone Jack", "HPR",
- "IN1", "Mic Jack",
- "Mic Jack", "MICBIAS";
-};
"tx" for "st,sti-uni-player" compatibility
"rx" for "st,sti-uni-reader" compatibility
- - version: IP version integrated in SOC.
+ - st,version: IP version integrated in SOC.
- dai-name: DAI name that describes the IP.
- - IP mode: IP working mode depending on associated codec.
+ - st,mode: IP working mode depending on associated codec.
"HDMI" connected to HDMI codec and support IEC HDMI formats (player only).
"SPDIF" connected to SPDIF codec and support SPDIF formats (player only).
"PCM" PCM standard mode for I2S or TDM bus.
- clocks: CPU_DAI IP clock source, listed in the same order than the
CPU_DAI properties.
- - uniperiph-id: internal SOC IP instance ID.
+ - st,uniperiph-id: internal SOC IP instance ID.
Optional properties:
- pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
dmas = <&fdma0 4 0 1>;
dai-name = "Uni Player #2 (DAC)";
dma-names = "tx";
- uniperiph-id = <2>;
- version = <5>;
- mode = "PCM";
+ st,uniperiph-id = <2>;
+ st,version = <5>;
+ st,mode = "PCM";
};
sti_uni_player3: sti-uni-player@3 {
dmas = <&fdma0 7 0 1>;
dma-names = "tx";
dai-name = "Uni Player #3 (SPDIF)";
- uniperiph-id = <3>;
- version = <5>;
- mode = "SPDIF";
+ st,uniperiph-id = <3>;
+ st,version = <5>;
+ st,mode = "SPDIF";
};
sti_uni_reader1: sti-uni-reader@1 {
dmas = <&fdma0 6 0 1>;
dma-names = "rx";
dai-name = "Uni Reader #1 (HDMI RX)";
- version = <3>;
+ st,version = <3>;
st,mode = "PCM";
};
--- /dev/null
+* Allwinner A10 I2S controller
+
+The I2S bus (Inter-IC sound bus) is a serial link for digital
+audio data transfer between devices in the system.
+
+Required properties:
+
+- compatible: should be one of the followings
+ - "allwinner,sun4i-a10-i2s"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: should contain the I2S interrupt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: should include "tx" and "rx".
+- clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names.
+- clock-names: should contain followings:
+ - "apb" : clock for the I2S bus interface
+ - "mod" : module clock for the I2S controller
+- #sound-dai-cells : Must be equal to 0
+
+Example:
+
+i2s0: i2s@01c22400 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-i2s";
+ reg = <0x01c22400 0x400>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb0_gates 3>, <&i2s0_clk>;
+ clock-names = "apb", "mod";
+ dmas = <&dma SUN4I_DMA_NORMAL 3>,
+ <&dma SUN4I_DMA_NORMAL 3>;
+ dma-names = "rx", "tx";
+};
SUNW Sun Microsystems, Inc
tbs TBS Technologies
tcl Toby Churchill Ltd.
+technexion TechNexion
technologic Technologic Systems
thine THine Electronics, Inc.
ti Texas Instruments
truly Truly Semiconductors Limited
tyan Tyan Computer Corporation
upisemi uPI Semiconductor Corp.
+uniwest United Western Technologies Corp (UniWest)
urt United Radiant Technology Corporation
usi Universal Scientific Industrial Co., Ltd.
v3 V3 Semiconductor
+Each mount of the devpts filesystem is now distinct such that ptys
+and their indicies allocated in one mount are independent from ptys
+and their indicies in all other mounts.
-To support containers, we now allow multiple instances of devpts filesystem,
-such that indices of ptys allocated in one instance are independent of indices
-allocated in other instances of devpts.
+All mounts of the devpts filesystem now create a /dev/pts/ptmx node
+with permissions 0000.
-To preserve backward compatibility, this support for multiple instances is
-enabled only if:
+To retain backwards compatibility the a ptmx device node (aka any node
+created with "mknod name c 5 2") when opened will look for an instance
+of devpts under the name "pts" in the same directory as the ptmx device
+node.
- - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
- - '-o newinstance' mount option is specified while mounting devpts
-
-IOW, devpts now supports both single-instance and multi-instance semantics.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
-this referred to as the "legacy" mode. In this mode, the new mount options
-(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
-on console.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
-'newinstance' option (as in current start-up scripts) the new mount binds
-to the initial kernel mount of devpts. This mode is referred to as the
-'single-instance' mode and the current, single-instance semantics are
-preserved, i.e PTYs are common across the system.
-
-The only difference between this single-instance mode and the legacy mode
-is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
-can safely be ignored.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
-the mount is considered to be in the multi-instance mode and a new instance
-of the devpts fs is created. Any ptys created in this instance are independent
-of ptys in other instances of devpts. Like in the single-instance mode, the
-/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
-open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
-bind-mount.
-
-Eg: A container startup script could do the following:
-
- $ chmod 0666 /dev/pts/ptmx
- $ rm /dev/ptmx
- $ ln -s pts/ptmx /dev/ptmx
- $ ns_exec -cm /bin/bash
-
- # We are now in new container
-
- $ umount /dev/pts
- $ mount -t devpts -o newinstance lxcpts /dev/pts
- $ sshd -p 1234
-
-where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
-/bin/bash in the child process. A pty created by the sshd is not visible in
-the original mount of /dev/pts.
+As an option instead of placing a /dev/ptmx device node at /dev/ptmx
+it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
+to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using
+the devpts filesystem in this manner devpts should be mounted with
+the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
Total count of pty pairs in all instances is limited by sysctls:
kernel.pty.max = 4096 - global limit
-kernel.pty.reserve = 1024 - reserve for initial instance
+kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace
kernel.pty.nr - current count of ptys
Per-instance limit could be set by adding mount option "max=<count>".
This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
-
-User-space changes
-------------------
-
-In multi-instance mode (i.e '-o newinstance' mount option is specified at least
-once), following user-space issues should be noted.
-
-1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
- and no change is needed to system-startup scripts.
-
-2. To effectively use multi-instance mode (i.e -o newinstance is specified)
- administrators or startup scripts should "redirect" open of /dev/ptmx to
- /dev/pts/ptmx using either a bind mount or symlink.
-
- $ mount -t devpts -o newinstance devpts /dev/pts
-
- followed by either
-
- $ rm /dev/ptmx
- $ ln -s pts/ptmx /dev/ptmx
- $ chmod 666 /dev/pts/ptmx
- or
- $ mount -o bind /dev/pts/ptmx /dev/ptmx
-
-3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
- enables better error-reporting and treats both single-instance and
- multi-instance mounts similarly.
-
- But this method requires that system-startup scripts set the mode of
- /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
- mode by, either
-
- - adding ptmxmode mount option to devpts entry in /etc/fstab, or
- - using 'chmod 0666 /dev/pts/ptmx'
-
-4. If multi-instance mode mount is needed for containers, but the system
- startup scripts have not yet been updated, container-startup scripts
- should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
- instance mounts.
-
- Or, in general, container-startup scripts should use:
-
- mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
- if [ ! -L /dev/ptmx ]; then
- mount -o bind /dev/pts/ptmx /dev/ptmx
- fi
-
- When all devpts mounts are multi-instance, /dev/ptmx can permanently be
- a symlink to pts/ptmx and the bind mount can be ignored.
-
-5. A multi-instance mount that is not accompanied by the /dev/ptmx to
- /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
-
- mount -t devpts -o newinstance lxcpts /dev/pts
-
- immediately followed by:
-
- open("/dev/ptmx")
-
- would create a pty, say /dev/pts/7, in the initial kernel mount.
- But /dev/pts/7 would be invisible in the new mount.
-
-6. The permissions for /dev/pts/ptmx node should be specified when mounting
- /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
-
- mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
-
- The permissions can be later be changed as usual with 'chmod'.
-
- chmod 666 /dev/pts/ptmx
-
-7. A mount of devpts without the 'newinstance' option results in binding to
- initial kernel mount. This behavior while preserving legacy semantics,
- does not provide strict isolation in a container environment. i.e by
- mounting devpts without the 'newinstance' option, a container could
- get visibility into the 'host' or root container's devpts.
-
- To workaround this and have strict isolation, all mounts of devpts,
- including the mount in the root container, should use the newinstance
- option.
start_comm = "swapper/2\000\000\000\000\000\000"
}
- o Dig into a radix tree data structure, such as the IRQ descriptors:
- (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
- $6 = {
- irq_common_data = {
- state_use_accessors = 67584,
- handler_data = 0x0 <__vectors_start>,
- msi_desc = 0x0 <__vectors_start>,
- affinity = {{
- bits = {65535}
- }}
- },
- irq_data = {
- mask = 0,
- irq = 18,
- hwirq = 27,
- common = 0xee803d80,
- chip = 0xc0eb0854 <gic_data>,
- domain = 0xee808000,
- parent_data = 0x0 <__vectors_start>,
- chip_data = 0xc0eb0854 <gic_data>
- } <... trimmed ...>
List of commands and functions
------------------------------
address the kernel panicked.
end
+define dump_log_idx
+ set $idx = $arg0
+ if ($argc > 1)
+ set $prev_flags = $arg1
+ else
+ set $prev_flags = 0
+ end
+ set $msg = ((struct printk_log *) (log_buf + $idx))
+ set $prefix = 1
+ set $newline = 1
+ set $log = log_buf + $idx + sizeof(*$msg)
-define dmesg
- set $i = 0
- set $end_idx = (log_end - 1) & (log_buf_len - 1)
+ # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+ if (($prev_flags & 8) && !($msg->flags & 4))
+ set $prefix = 0
+ end
+
+ # msg->flags & LOG_CONT
+ if ($msg->flags & 8)
+ # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+ if (($prev_flags & 8) && !($prev_flags & 2))
+ set $prefix = 0
+ end
+ # (!(msg->flags & LOG_NEWLINE))
+ if (!($msg->flags & 2))
+ set $newline = 0
+ end
+ end
+
+ if ($prefix)
+ printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
+ end
+ if ($msg->text_len != 0)
+ eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
+ end
+ if ($newline)
+ printf "\n"
+ end
+ if ($msg->dict_len > 0)
+ set $dict = $log + $msg->text_len
+ set $idx = 0
+ set $line = 1
+ while ($idx < $msg->dict_len)
+ if ($line)
+ printf " "
+ set $line = 0
+ end
+ set $c = $dict[$idx]
+ if ($c == '\0')
+ printf "\n"
+ set $line = 1
+ else
+ if ($c < ' ' || $c >= 127 || $c == '\\')
+ printf "\\x%02x", $c
+ else
+ printf "%c", $c
+ end
+ end
+ set $idx = $idx + 1
+ end
+ printf "\n"
+ end
+end
+document dump_log_idx
+ Dump a single log given its index in the log buffer. The first
+ parameter is the index into log_buf, the second is optional and
+ specified the previous log buffer's flags, used for properly
+ formatting continued lines.
+end
- while ($i < logged_chars)
- set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
+define dmesg
+ set $i = log_first_idx
+ set $end_idx = log_first_idx
+ set $prev_flags = 0
- if ($idx + 100 <= $end_idx) || \
- ($end_idx <= $idx && $idx + 100 < log_buf_len)
- printf "%.100s", &log_buf[$idx]
- set $i = $i + 100
+ while (1)
+ set $msg = ((struct printk_log *) (log_buf + $i))
+ if ($msg->len == 0)
+ set $i = 0
else
- printf "%c", log_buf[$idx]
- set $i = $i + 1
+ dump_log_idx $i $prev_flags
+ set $i = $i + $msg->len
+ set $prev_flags = $msg->flags
+ end
+ if ($i == $end_idx)
+ loop_break
end
end
end
however, it is better to use the API function led_blink_set(), as it
will check and implement software fallback if necessary.
-To turn off blinking again, use the API function led_brightness_set()
-as that will not just set the LED brightness but also stop any software
+To turn off blinking, use the API function led_brightness_set()
+with brightness value LED_OFF, which should stop any software
timers that may have been required for blinking.
The blink_set() function should choose a user friendly blinking value
Switch configuration
--------------------
-- priv_size: additional size needed by the switch driver for its private context
-
- tag_protocol: this is to indicate what kind of tagging protocol is supported,
should be a valid value from the dsa_tag_protocol enum
to the switch port MDIO registers. If unavailable return a negative error
code.
-- poll_link: Function invoked by DSA to query the link state of the switch
- builtin Ethernet PHYs, per port. This function is responsible for calling
- netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
- single call. Executes from workqueue context.
-
- adjust_link: Function invoked by the PHY library when a slave network device
is attached to a PHY device. This function is responsible for appropriately
configuring the switch port link parameters: speed, duplex, pause based on
Bridge VLAN filtering
---------------------
+- port_vlan_filtering: bridge layer function invoked when the bridge gets
+ configured for turning on or off VLAN filtering. If nothing specific needs to
+ be done at the hardware level, this callback does not need to be implemented.
+ When VLAN filtering is turned on, the hardware must be programmed with
+ rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
+ VLAN ID map/rules. If there is no PVID programmed into the switch port,
+ untagged frames must be rejected as well. When turned off the switch must
+ accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
+ allowed.
+
- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
configuration of a VLAN on the given port. If the operation is not supported
by the hardware, this function should return -EOPNOTSUPP to inform the bridge
shared_media - BOOLEAN
Send(router) or accept(host) RFC1620 shared media redirects.
- Overrides ip_secure_redirects.
+ Overrides secure_redirects.
shared_media for the interface will be enabled if at least one of
conf/{all,interface}/shared_media is set to TRUE,
it will be disabled otherwise
default TRUE
secure_redirects - BOOLEAN
- Accept ICMP redirect messages only for gateways,
- listed in default gateway list.
+ Accept ICMP redirect messages only to gateways listed in the
+ interface's current gateway list. Even if disabled, RFC1122 redirect
+ rules still apply.
+ Overridden by shared_media.
secure_redirects for the interface will be enabled if at least one of
conf/{all,interface}/secure_redirects is set to TRUE,
it will be disabled otherwise
3. scmd recovered
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
- - shost->host_failed--
- clear scmd->eh_eflags
- scsi_setup_cmd_retry()
- move from local eh_work_q to local eh_done_q
LOCKING: none
+ CONCURRENCY: at most one thread per separate eh_work_q to
+ keep queue manipulation lockless
4. EH completes
ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
- layer of failure.
+ layer of failure. May be called concurrently but must have
+ a no more than one thread per separate eh_work_q to
+ manipulate the queue locklessly
- scmd is removed from eh_done_q and scmd->eh_entry is cleared
- if retry is necessary, scmd is requeued using
scsi_queue_insert()
- otherwise, scsi_finish_command() is invoked for scmd
+ - zero shost->host_failed
LOCKING: queue or finish function performs appropriate locking
(*) Compute a Diffie-Hellman shared secret or public key
long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
- char *buffer, size_t buflen);
+ char *buffer, size_t buflen,
+ void *reserved);
The params struct contains serial numbers for three keys:
public key. If the base is the remote public key, the result is
the shared secret.
+ The reserved argument must be set to NULL.
+
The buffer length must be at least the length of the prime, or zero.
If the buffer length is nonzero, the length of the result is
The ASoC machine (or board) driver is the code that glues together all the
component drivers (e.g. codecs, platforms and DAIs). It also describes the
-relationships between each componnent which include audio paths, GPIOs,
+relationships between each component which include audio paths, GPIOs,
interrupts, clocking, jacks and voltage regulators.
The machine driver can contain codec and platform specific code. It registers
MPX-instrumented.
3) The kernel detects that the CPU has MPX, allows the new prctl() to
succeed, and notes the location of the bounds directory. Userspace is
- expected to keep the bounds directory at that locationWe note it
+ expected to keep the bounds directory at that location. We note it
instead of reading it each time because the 'xsave' operation needed
to access the bounds directory register is an expensive operation.
4) If the application needs to spill bounds out of the 4 registers, it
We need to decode MPX instructions to get violation address and
set this address into extended struct siginfo.
-The _sigfault feild of struct siginfo is extended as follow:
+The _sigfault field of struct siginfo is extended as follow:
87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
88 struct {
This is allowed architecturally. See more information "Intel(R) Architecture
Instruction Set Extensions Programming Reference" (9.3.4).
-However, if users did this, the kernel might be fooled in to unmaping an
+However, if users did this, the kernel might be fooled in to unmapping an
in-use bounds table since it does not recognize sharing.
from areas other than the one we are trying to flush will be
destroyed and must be refilled later, at some cost.
2. Use the invlpg instruction to invalidate a single page at a
- time. This could potentialy cost many more instructions, but
+ time. This could potentially cost many more instructions, but
it is a much more precise operation, causing no collateral
damage to other TLB entries.
work.
3. The size of the TLB. The larger the TLB, the more collateral
damage we do with a full flush. So, the larger the TLB, the
- more attrative an individual flush looks. Data and
+ more attractive an individual flush looks. Data and
instructions have separate TLBs, as do different page sizes.
4. The microarchitecture. The TLB has become a multi-level
cache on modern CPUs, and the global flushes have become more
check_interval
How often to poll for corrected machine check errors, in seconds
- (Note output is hexademical). Default 5 minutes. When the poller
+ (Note output is hexadecimal). Default 5 minutes. When the poller
finds MCEs it triggers an exponential speedup (poll more often) on
the polling interval. When the poller stops finding MCEs, it
triggers an exponential backoff (poll less often) on the polling
L: linux-alpha@vger.kernel.org
F: arch/alpha/
+ALPS PS/2 TOUCHPAD DRIVER
+R: Pali Rohár <pali.rohar@gmail.com>
+F: drivers/input/mouse/alps.*
+
ALTERA MAILBOX DRIVER
M: Ley Foon Tan <lftan@altera.com>
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <kernel@pengutronix.de>
+R: Fabio Estevam <fabio.estevam@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: drivers/memory/samsung/*
F: drivers/soc/samsung/*
F: drivers/spi/spi-s3c*
-F: sound/soc/samsung/*
F: Documentation/arm/Samsung/
F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/sram/samsung-sram.txt
F: drivers/edac/altera_edac.
ARM/STI ARCHITECTURE
-M: Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
-M: Maxime Coquelin <maxime.coquelin@st.com>
M: Patrice Chotard <patrice.chotard@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kernel@stlinux.com
ARM/STM32 ARCHITECTURE
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+M: Alexandre Torgue <alexandre.torgue@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
F: net/ax25/
AZ6007 DVB DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: fs/btrfs/
BTTV VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: net/caif/
CALGARY x86-64 IOMMU
-M: Muli Ben-Yehuda <muli@il.ibm.com>
-M: "Jon D. Mason" <jdmason@kudzu.us>
-L: discuss@x86-64.org
+M: Muli Ben-Yehuda <mulix@mulix.org>
+M: Jon Mason <jdmason@kudzu.us>
+L: iommu@lists.linux-foundation.org
S: Maintained
F: arch/x86/kernel/pci-calgary_64.c
F: arch/x86/kernel/tce_64.c
L: linux-clk@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
S: Maintained
+F: Documentation/devicetree/bindings/clock/
F: drivers/clk/
X: drivers/clk/clkdev.c
F: include/linux/clk-pr*
F: drivers/media/dvb-frontends/cx24120*
CX88 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/dma/
F: include/linux/dmaengine.h
+F: Documentation/devicetree/bindings/dma/
F: Documentation/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <bp@alien8.de>
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
F: drivers/edac/e7xxx_edac.c
EDAC-GHES
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/ghes_edac.c
F: drivers/edac/i5000_edac.c
EDAC-I5400
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i5400_edac.c
EDAC-I7300
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i7300_edac.c
EDAC-I7CORE
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i7core_edac.c
F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+M: Douglas Miller <dougmill@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ibm/ehea/
EM28XX VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <shuahkh@osg.samsung.com>
+M: Shuah Khan <shuah@kernel.org>
L: linux-kselftest@vger.kernel.org
T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
S: Maintained
F: drivers/hwmon/max6697.c
F: include/linux/platform_data/max6697.h
+MAX9860 MONO AUDIO VOICE CODEC DRIVER
+M: Peter Rosin <peda@axentia.se>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/max9860.txt
+F: sound/soc/codecs/max9860.*
+
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
L: linux-pm@vger.kernel.org
F: drivers/media/pci/netup_unidvb/*
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
P: LinuxTV.org Project
L: linux-media@vger.kernel.org
W: https://linuxtv.org
F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en)
-M: Eugenia Emantayev <eugenia@mellanox.com>
+M: Tariq Toukan <tariqt@mellanox.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
T: git git://git.infradead.org/linux-mtd.git
T: git git://git.infradead.org/l2-mtd.git
S: Maintained
+F: Documentation/devicetree/bindings/mtd/
F: drivers/mtd/
F: include/linux/mtd/
F: include/uapi/mtd/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
S: Odd Fixes
+F: Documentation/devicetree/bindings/net/
F: drivers/net/
F: include/linux/if_*
F: include/linux/netdevice.h
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
S: Maintained
+F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
M: Frank Rowand <frowand.list@gmail.com>
-M: Grant Likely <grant.likely@linaro.org>
L: devicetree@vger.kernel.org
W: http://www.devicetree.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
S: Maintained
F: drivers/of/
F: include/linux/of*.h
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
-M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
-M: Ian Campbell <ijc+devicetree@hellion.org.uk>
-M: Kumar Gala <galak@codeaurora.org>
L: devicetree@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
+Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
S: Maintained
F: Documentation/devicetree/
F: arch/*/boot/dts/
L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained
+F: Documentation/devicetree/bindings/pinctrl/
+F: Documentation/pinctrl.txt
F: drivers/pinctrl/
F: include/linux/pinctrl/
F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/platform/x86/samsung-laptop.c
SAMSUNG AUDIO (ASoC) DRIVERS
+M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
M: Sangbeom Kim <sbkim73@samsung.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung/
F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
S: Supported
+F: Documentation/devicetree/bindings/sound/
F: Documentation/sound/alsa/soc/
F: sound/soc/
F: include/sound/soc*
F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: mm/shmem.c
TM6000 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com>
-M: Shuah Khan <shuah.kh@samsung.com>
+M: Shuah Khan <shuahkh@osg.samsung.com>
+M: Shuah Khan <shuah@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/usb/usbip_protocol.txt
W: http://www.linux-usb.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
S: Supported
+F: Documentation/devicetree/bindings/usb/
F: Documentation/usb/
F: drivers/usb/
F: include/linux/usb.h
M: "Michael S. Tsirkin" <mst@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
+F: Documentation/devicetree/bindings/virtio/
F: drivers/virtio/
F: tools/virtio/
F: drivers/net/virtio_net.c
F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
VERSION = 4
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF)
+NOSTDINC_FLAGS =
CFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
+LDFLAGS_vmlinux =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV = -fsanitize-coverage=trace-pc
config ARCH_TASK_STRUCT_ALLOCATOR
bool
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
file which provides platform-specific implementations of some
functions in <linux/hash.h> or fs/namei.c.
+config ISA_BUS_API
+ def_bool ISA
+
#
# ABI hall of shame
#
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return ret;
}
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
def_bool y
config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
+ def_bool n
config ARCH_FLATMEM_ENABLE
def_bool y
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_HAS_REENTRANT_IRQ_LV2
- def_bool n
-
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
- bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+ bool "Setup Timer IRQ as high Priority"
default n
- # Timer HAS to be high priority, for any other high priority config
- select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
- depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
- bool
-
-config ARC_IRQ5_LV2
- bool
-
-config ARC_IRQ6_LV2
- bool
-
-endif #ARC_COMPACT_IRQ_LEVELS
+ depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
default y
depends on !ARC_CANT_LLSC
-config ARC_STAR_9000923308
- bool "Workaround for llock/scond livelock"
- default n
- depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
config HIGHMEM
bool "High Memory Support"
- select DISCONTIGMEM
+ select ARCH_DISCONTIGMEM_ENABLE
help
With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE
endif
-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
-
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
boot := arch/arc/boot
-#default target for make without any arguements.
+#default target for make without any arguments.
KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)
/ {
- clock-frequency = <500000000>; /* 500 MHZ */
-
soc100 {
bus-frequency = <166666666>;
/ {
- clock-frequency = <500000000>; /* 500 MHZ */
-
soc100 {
bus-frequency = <166666666>;
/ {
compatible = "snps,arc";
- clock-frequency = <750000000>; /* 750 MHZ */
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "snps,arc";
- clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "snps,arc";
- clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "ezchip,arc-nps";
- clock-frequency = <83333333>; /* 83.333333 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
/ {
compatible = "snps,nsim";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,nsimosci";
- clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,nsimosci_hs";
- clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,nsimosci_hs";
- clock-frequency = <5000000>; /* 5 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
/ {
compatible = "snps,arc";
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "snps,arc";
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay = 1, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " bz 4f \n" \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- "4: ; --- success --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
-
-#else /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM \
- " bnz 1b \n" \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
- * Above brlo alone would treat it as a valid L1-L2 sceanrio
- * instead of shouting alound
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack
local_flush_tlb_all();
/*
- * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
return pte;
pgtable_t pte_pg;
struct page *page;
- pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
#ifdef CONFIG_ARC_HAS_LLSC
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
smp_mb();
}
-#else /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- " \n" \
- "4: ; --- done --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
-
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * zero means writer holds the lock exclusively, deny Reader.
- * Otherwise grant lock to first/subseq reader
- *
- * if (rw->counter > 0) {
- * rw->counter--;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
- " sub %[val], %[val], 1 \n" /* reader lock */
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
- " sub %[val], %[val], 1 \n" /* counter-- */
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
- * deny writer. Otherwise if unlocked grant to writer
- * Hence the claim that Linux rwlocks are unfair to writers.
- * (can be starved for an indefinite time by readers).
- *
- * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
- * rw->counter = 0;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n"
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter++;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " add %[val], %[val], 1 \n"
- " scond %[val], [%[rwlock]] \n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter))
- : "memory", "cc");
-
- smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " scond %[UNLOCKED], [%[rwlock]]\n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.
__tmp ^ __in; \
})
-#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR handle_interrupt_level2
-#else
-VECTOR handle_interrupt_level1
-#endif
-
-VECTOR handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR handle_interrupt_level2
-#else
-VECTOR handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
-.rept 25
+.rept 28
VECTOR handle_interrupt_level1 ; Other devices
.endr
{
int level_mask = 0;
- /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+ /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+ level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
/*
* Write to register, even if no LV2 IRQs configured to reset it
int64_t delta = new_raw_count - prev_raw_count;
/*
- * We don't afaraid of hwc->prev_count changing beneath our feet
+ * We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime.
*/
local64_set(&hwc->prev_count, new_raw_count);
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
- * Appent to embedded DT cmdline.
+ * append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
- * This was cauing TLB entries to be overwritten on unrelated indexes
+ * This was causing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,
* prelogue is setup (callee regs saved and then fp set and not other
* way around
*/
- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;
#endif
return 0;
}
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
* ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced.
/*
* This is technically for MMU v4, using the MMU v3 programming model
- * Special work for HS38 aliasing I-cache configuratino with PAE40
+ * Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop
ic->ver, CONFIG_ARC_MMU_VER);
/*
- * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+ * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.
sun7i-a20-olimex-som-evb.dtb \
sun7i-a20-olinuxino-lime.dtb \
sun7i-a20-olinuxino-lime2.dtb \
+ sun7i-a20-olinuxino-lime2-emmc.dtb \
sun7i-a20-olinuxino-micro.dtb \
sun7i-a20-orangepi.dtb \
sun7i-a20-orangepi-mini.dtb \
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
tps@24 {
compatible = "ti,tps65218";
tps659038_pmic {
compatible = "ti,tps659038-pmic";
+
+ smps12-in-supply = <&vmain>;
+ smps3-in-supply = <&vmain>;
+ smps45-in-supply = <&vmain>;
+ smps6-in-supply = <&vmain>;
+ smps7-in-supply = <&vmain>;
+ smps8-in-supply = <&vmain>;
+ smps9-in-supply = <&vmain>;
+ ldo1-in-supply = <&vmain>;
+ ldo2-in-supply = <&vmain>;
+ ldo3-in-supply = <&vmain>;
+ ldo4-in-supply = <&vmain>;
+ ldo9-in-supply = <&vmain>;
+ ldoln-in-supply = <&vmain>;
+ ldousb-in-supply = <&vmain>;
+ ldortc-in-supply = <&vmain>;
+
regulators {
smps12_reg: smps12 {
/* VDD_MPU */
- vin-supply = <&vmain>;
regulator-name = "smps12";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
smps3_reg: smps3 {
/* VDD_DDR EMIF1 EMIF2 */
- vin-supply = <&vmain>;
regulator-name = "smps3";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
smps45_reg: smps45 {
/* VDD_DSPEVE on AM572 */
/* VDD_IVA + VDD_DSP on AM571 */
- vin-supply = <&vmain>;
regulator-name = "smps45";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
smps6_reg: smps6 {
/* VDD_GPU */
- vin-supply = <&vmain>;
regulator-name = "smps6";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
smps7_reg: smps7 {
/* VDD_CORE */
- vin-supply = <&vmain>;
regulator-name = "smps7";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1150000>;
smps8_reg: smps8 {
/* 5728 - VDD_IVAHD */
/* 5718 - N.C. test point */
- vin-supply = <&vmain>;
regulator-name = "smps8";
};
smps9_reg: smps9 {
/* VDD_3_3D */
- vin-supply = <&vmain>;
regulator-name = "smps9";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
ldo1_reg: ldo1 {
/* VDDSHV8 - VSDMMC */
/* NOTE: on rev 1.3a, data supply */
- vin-supply = <&vmain>;
regulator-name = "ldo1";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
ldo2_reg: ldo2 {
/* VDDSH18V */
- vin-supply = <&vmain>;
regulator-name = "ldo2";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldo3_reg: ldo3 {
/* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
- vin-supply = <&vmain>;
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldo4_reg: ldo4 {
/* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
- vin-supply = <&vmain>;
regulator-name = "ldo4";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldo9_reg: ldo9 {
/* VDD_RTC */
- vin-supply = <&vmain>;
regulator-name = "ldo9";
regulator-min-microvolt = <840000>;
regulator-max-microvolt = <1160000>;
ldoln_reg: ldoln {
/* VDDA_1V8_PLL */
- vin-supply = <&vmain>;
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldousb_reg: ldousb {
/* VDDA_3V_USB: VDDA_USBHS33 */
- vin-supply = <&vmain>;
regulator-name = "ldousb";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
ldortc_reg: ldortc {
/* VDDA_RTC */
- vin-supply = <&vmain>;
regulator-name = "ldortc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
internal-regs {
};
};
+&mmc1 {
+ status = "disabled";
+};
+
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&sd1_pins>;
cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
};
+&mmc3 {
+ status = "disabled";
+};
+
&pincntl {
sd1_pins: pinmux_sd1_pins {
pinctrl-single,pins = <
phy-mode = "rgmii";
};
+&mmc1 {
+ status = "disabled";
+};
+
+&mmc2 {
+ status = "disabled";
+};
+
&mmc3 {
pinctrl-names = "default";
pinctrl-0 = <&sd2_pins>;
dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */
&edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */
dma-names = "tx", "rx";
+ non-removable;
};
&pincntl {
ti,hwmods = "gpmc";
reg = <0x50000000 0x37c>; /* device IO registers */
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma_xbar 4 0>;
+ dma-names = "rxtx";
gpmc,num-cs = <8>;
gpmc,num-waitpins = <2>;
#address-cells = <2>;
reg = <0x58000000 0x80>,
<0x58004054 0x4>,
<0x58004300 0x20>,
- <0x58005054 0x4>,
- <0x58005300 0x20>;
+ <0x58009054 0x4>,
+ <0x58009300 0x20>;
reg-names = "dss", "pll1_clkctrl", "pll1",
"pll2_clkctrl", "pll2";
hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
ports {
- port0 {
+ port {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
edid-emulation = <5>;
ports {
- port0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
- port1 {
+ port@1 {
+ reg = <1>;
+
bridge_in: endpoint {
remote-endpoint = <&dp_out>;
};
hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
ports {
- port0 {
+ port {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
use-external-pwm;
ports {
- port0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
- port1 {
+ port@1 {
+ reg = <1>;
+
bridge_in: endpoint {
remote-endpoint = <&dp_out>;
};
OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
- OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
>;
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
bus-width = <4>;
+ cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
};
&mmc3 {
OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
>;
};
+
+ mmc1_wp_pins: pinmux_mmc1_cd_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */
+ >;
+ };
};
&i2c3 {
};
};
};
+
+&mmc1 {
+ pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
+ wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
+};
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
modem_pins: pinmux_modem {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */
- OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */
+ OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* gpio 72 => ape_rst_rq */
OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */
OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */
OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */
OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */
modem_pins1: pinmux_modem_core1_pins {
pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
+ OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */
OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */
>;
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
- OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+ OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
>;
};
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
- OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
>;
};
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
- OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+ OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
>;
display0 = &hdmi0;
};
+ vmain: fixedregulator-vmain {
+ compatible = "regulator-fixed";
+ regulator-name = "vmain";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vsys_cobra: fixedregulator-vsys_cobra {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_cobra";
+ vin-supply = <&vmain>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vdds_1v8_main: fixedregulator-vdds_1v8_main {
+ compatible = "regulator-fixed";
+ regulator-name = "vdds_1v8_main";
+ vin-supply = <&smps7_reg>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
vmmcsd_fixed: fixedregulator-mmcsd {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
wlcore_irq_pin: pinmux_wlcore_irq_pin {
pinctrl-single,pins = <
- OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
+ OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
>;
};
};
ti,ldo6-vibrator;
+ smps123-in-supply = <&vsys_cobra>;
+ smps45-in-supply = <&vsys_cobra>;
+ smps6-in-supply = <&vsys_cobra>;
+ smps7-in-supply = <&vsys_cobra>;
+ smps8-in-supply = <&vsys_cobra>;
+ smps9-in-supply = <&vsys_cobra>;
+ smps10_out2-in-supply = <&vsys_cobra>;
+ smps10_out1-in-supply = <&vsys_cobra>;
+ ldo1-in-supply = <&vsys_cobra>;
+ ldo2-in-supply = <&vsys_cobra>;
+ ldo3-in-supply = <&vdds_1v8_main>;
+ ldo4-in-supply = <&vdds_1v8_main>;
+ ldo5-in-supply = <&vsys_cobra>;
+ ldo6-in-supply = <&vdds_1v8_main>;
+ ldo7-in-supply = <&vsys_cobra>;
+ ldo8-in-supply = <&vsys_cobra>;
+ ldo9-in-supply = <&vmmcsd_fixed>;
+ ldoln-in-supply = <&vsys_cobra>;
+ ldousb-in-supply = <&vsys_cobra>;
+
regulators {
smps123_reg: smps123 {
/* VDD_OPP_MPU */
pinctrl-0 = <&twl6040_pins>;
interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
- ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
+
+ /* audpwron gpio defined in the board specific dts */
vio-supply = <&smps7_reg>;
v2v1-supply = <&smps9_reg>;
};
};
+/* LDO4 is VPP1 - ball AD9 */
+&ldo4_reg {
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+};
+
+/*
+ * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
+ * VDDA_HDMI - ball AN25
+ */
+&ldo7_reg {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
&omap5_pmx_core {
i2c4_pins: pinmux_i2c4_pins {
pinctrl-single,pins = <
<&gpio7 3 0>; /* 195, SDA */
};
+&twl6040 {
+ ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>; /* gpio line 144 */
+};
+
+&twl6040_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_144 */
+ OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */
+ >;
+};
<&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */
<&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */
};
+
+&twl6040 {
+ ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
+};
+
+&twl6040_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
+ >;
+};
&gmac1 {
status = "okay";
phy-mode = "rgmii";
+ phy-handle = <&phy1>;
snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
compatible = "shared-dma-pool";
reg = <0x40000000 0x01000000>;
no-map;
+ status = "disabled";
};
gp1_reserved: rproc@41000000 {
compatible = "shared-dma-pool";
reg = <0x41000000 0x01000000>;
no-map;
+ status = "disabled";
};
audio_reserved: rproc@42000000 {
compatible = "shared-dma-pool";
reg = <0x42000000 0x01000000>;
no-map;
+ status = "disabled";
};
dmu_reserved: rproc@43000000 {
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&ahb_gates 46>,
<&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
- <&ahb_gates 46>, <&dram_gates 25>,
- <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&ahb_gates 46>,
+ <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
- clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+ <&ahb_gates 36>, <&ahb_gates 44>,
+ <&ahb_gates 46>,
<&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
- <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+ <&ahb_gates 36>, <&ahb_gates 44>;
status = "disabled";
};
};
/ {
model = "NextThing C.H.I.P.";
- compatible = "nextthing,chip", "allwinner,sun5i-r8";
+ compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
aliases {
i2c0 = &i2c0;
};
®_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
regulator-name = "vcc-lcd";
};
®_dc1sw {
regulator-name = "vcc-lcd-usb2";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
};
®_dc5ldo {
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
- <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll5 1>,
+ clocks = <&pll3>, <&pll5 1>,
<&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
<&dram_gates 5>, <&dram_gates 26>;
status = "disabled";
pll3x2: pll3x2_clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
+ clocks = <&pll3>;
clock-div = <1>;
clock-mult = <2>;
clock-output-names = "pll3-2x";
pll7x2: pll7x2_clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
+ clocks = <&pll7>;
clock-div = <1>;
clock-mult = <2>;
clock-output-names = "pll7-2x";
ldo5_reg: ldo5 {
regulator-name = "vddio_sdmmc,avdd_vdac";
- regulator-min-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
sdhci@78000000 {
status = "okay";
+ vqmmc-supply = <&ldo5_reg>;
cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio TEGRA_GPIO(T, 3) GPIO_ACTIVE_HIGH>;
power-gpios = <&gpio TEGRA_GPIO(D, 7) GPIO_ACTIVE_HIGH>;
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MAX77693_HAPTIC=y
CONFIG_INPUT_MAX8997_HAPTIC=y
+CONFIG_KEYBOARD_SAMSUNG=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_KEYBOARD_SPEAR=y
CONFIG_KEYBOARD_ST_KEYSCAN=y
CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_CYAPA=m
CONFIG_MOUSE_ELAN_I2C=y
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pmd_t *)get_zeroed_page(GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_present(pmd) (pmd_val(pmd))
#define copy_pmd(pmdpd,pmdps) \
do { \
: !!(pmd_val(pmd) & (val)))
#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
+#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
static inline pte_t pte_mkspecial(pte_t pte)
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
- return __pmd(0);
+ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
if (ret)
return ret;
- vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
+ vfp_flush_hwstate(thread);
return 0;
}
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise(target, ipi_types[ipinr]);
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
+ kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
select CPU_EXYNOS4210
select GIC_NON_BANKED
- select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
select MIGHT_HAVE_CACHE_L2X0
help
Samsung EXYNOS4 (Cortex-A9) SoC based systems
static void __init imx6ul_enet_phy_init(void)
{
if (IS_BUILTIN(CONFIG_PHYLIB))
- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
ksz8081_phy_fixup);
}
obj-$(CONFIG_MACH_MVEBU_ANY) += system-controller.o mvebu-soc-id.o
ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o pm.o pm-board.o
+obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM) += pm.o pm-board.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
endif
obj-$(CONFIG_MACH_DOVE) += dove.o
-obj-$(CONFIG_MACH_KIRKWOOD) += kirkwood.o kirkwood-pm.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y += kirkwood.o
+obj-$(CONFIG_PM) += kirkwood-pm.o
+endif
}
/*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
*/
static void __iomem *
-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller)
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
- struct resource pcie_mem;
-
- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
- mtype = MT_UNCACHED;
-
+ mtype = MT_UNCACHED;
return __arm_ioremap_caller(phys_addr, size, mtype, caller);
}
struct device_node *cache_dn;
coherency_cpu_base = of_iomap(np, 0);
- arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+ arch_ioremap_caller = armada_wa_ioremap_caller;
+ pci_ioremap_set_mem_type(MT_UNCACHED);
/*
* We should switch the PL310 to I/O coherency mode only if
#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
/* IRQ handler register bitmasks */
-#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
-#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1)
+#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
+#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
/* Driver buffer byte offsets */
#define BUF_MASK (FIQ_MASK * 4)
mov r8, #2 @ reset FIQ agreement
str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
- cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt?
+ cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
beq gpio @ yes - process it
mov r8, #1
* Since no set_type() method is provided by OMAP irq chip,
* switch to edge triggered interrupt type manually.
*/
- offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+ offset = IRQ_ILR0_REG_OFFSET +
+ ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
/*
* Redirect GPIO interrupts to FIQ
*/
- offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+ offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
val = omap_readl(OMAP_IH1_BASE + offset) | 1;
omap_writel(val, OMAP_IH1_BASE + offset);
}
#ifndef __AMS_DELTA_FIQ_H
#define __AMS_DELTA_FIQ_H
+#include <mach/irqs.h>
+
/*
* Interrupt number used for passing control from FIQ to IRQ.
* IRQ12, described as reserved, has been selected.
select PM_OPP if PM
select PM if CPU_IDLE
select SOC_HAS_OMAP2_SDRC
+ select ARM_ERRATA_430973
config ARCH_OMAP4
bool "TI OMAP4"
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
+ select OMAP_INTERCONNECT
config SOC_OMAP5
bool "TI OMAP5"
select HAVE_ARM_SCU
select GENERIC_CLOCKEVENTS_BROADCAST
select HAVE_ARM_TWD
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
config SOC_DRA7XX
bool "TI DRA7XX"
endif
+config OMAP5_ERRATA_801819
+ bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
+ depends on SOC_OMAP5 || SOC_DRA7XX
+ help
+ A livelock can occur in the L2 cache arbitration that might prevent
+ a snoop from completing. Under certain conditions this can cause the
+ system to deadlock.
+
endmenu
#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109
#define OMAP5_MON_AMBA_IF_INDEX 0x108
+#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_L2_POR_INDEX 0x23
return scu_base;
}
+#ifdef CONFIG_OMAP5_ERRATA_801819
+void omap5_erratum_workaround_801819(void)
+{
+ u32 acr, revidr;
+ u32 acr_mask;
+
+ /* REVIDR[3] indicates erratum fix available on silicon */
+ asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
+ if (revidr & (0x1 << 3))
+ return;
+
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+ /*
+ * BIT(27) - Disables streaming. All write-allocate lines allocate in
+ * the L1 or L2 cache.
+ * BIT(25) - Disables streaming. All write-allocate lines allocate in
+ * the L1 cache.
+ */
+ acr_mask = (0x3 << 25) | (0x3 << 27);
+ /* do we already have it done.. if yes, skip expensive smc */
+ if ((acr & acr_mask) == acr_mask)
+ return;
+
+ acr |= acr_mask;
+ omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+ pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
+ __func__, smp_processor_id());
+}
+#else
+static inline void omap5_erratum_workaround_801819(void) { }
+#endif
+
static void omap4_secondary_init(unsigned int cpu)
{
/*
omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
4, 0, 0, 0, 0, 0);
- /*
- * Configure the CNTFRQ register for the secondary cpu's which
- * indicates the frequency of the cpu local timers.
- */
- if (soc_is_omap54xx() || soc_is_dra7xx())
+ if (soc_is_omap54xx() || soc_is_dra7xx()) {
+ /*
+ * Configure the CNTFRQ register for the secondary cpu's which
+ * indicates the frequency of the cpu local timers.
+ */
set_cntfreq();
+ /* Configure ACR to disable streaming WA for 801819 */
+ omap5_erratum_workaround_801819();
+ }
/*
* Synchronise with the boot thread.
if (cpu_is_omap446x())
startup_addr = omap4460_secondary_startup;
+ if (soc_is_dra74x() || soc_is_omap54xx())
+ omap5_erratum_workaround_801819();
/*
* Write the address of secondary startup routine into the
trace_state = (PWRDM_TRACE_STATES_FLAG |
((next & OMAP_POWERSTATE_MASK) << 8) |
((prev & OMAP_POWERSTATE_MASK) << 0));
- trace_power_domain_target(pwrdm->name, trace_state,
- smp_processor_id());
+ trace_power_domain_target_rcuidle(pwrdm->name,
+ trace_state,
+ smp_processor_id());
}
break;
default:
if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
/* Trace the pwrdm desired target state */
- trace_power_domain_target(pwrdm->name, pwrst,
- smp_processor_id());
+ trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+ smp_processor_id());
/* Program the pwrdm desired target state */
ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
}
.prcm_offs = DRA7XX_PRM_IVA_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 4,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* hwa_mem */
- [1] = PWRSTS_OFF_RET, /* sl2_mem */
- [2] = PWRSTS_OFF_RET, /* tcm1_mem */
- [3] = PWRSTS_OFF_RET, /* tcm2_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* hwa_mem */
[1] = PWRSTS_ON, /* sl2_mem */
.prcm_offs = DRA7XX_PRM_IPU_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 2,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* aessmem */
- [1] = PWRSTS_OFF_RET, /* periphmem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* aessmem */
[1] = PWRSTS_ON, /* periphmem */
.prcm_offs = DRA7XX_PRM_DSS_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dss_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dss_mem */
},
.name = "l4per_pwrdm",
.prcm_offs = DRA7XX_PRM_L4PER_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
- .pwrsts = PWRSTS_RET_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
+ .pwrsts = PWRSTS_ON,
.banks = 2,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* nonretained_bank */
- [1] = PWRSTS_OFF_RET, /* retained_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* nonretained_bank */
[1] = PWRSTS_ON, /* retained_bank */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* gpu_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gpu_mem */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* wkup_bank */
},
.prcm_offs = DRA7XX_PRM_CORE_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
.banks = 5,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* core_nret_bank */
- [1] = PWRSTS_OFF_RET, /* core_ocmram */
- [2] = PWRSTS_OFF_RET, /* core_other_bank */
- [3] = PWRSTS_OFF_RET, /* ipu_l2ram */
- [4] = PWRSTS_OFF_RET, /* ipu_unicache */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* core_nret_bank */
[1] = PWRSTS_ON, /* core_ocmram */
.prcm_offs = DRA7XX_PRM_VPE_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* vpe_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* vpe_bank */
},
.name = "l3init_pwrdm",
.prcm_offs = DRA7XX_PRM_L3INIT_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
- .pwrsts = PWRSTS_RET_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
+ .pwrsts = PWRSTS_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* gmac_bank */
- [1] = PWRSTS_OFF_RET, /* l3init_bank1 */
- [2] = PWRSTS_OFF_RET, /* l3init_bank2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gmac_bank */
[1] = PWRSTS_ON, /* l3init_bank1 */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve3_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve3_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* emu_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* emu_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dsp2_edma */
- [1] = PWRSTS_OFF_RET, /* dsp2_l1 */
- [2] = PWRSTS_OFF_RET, /* dsp2_l2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dsp2_edma */
[1] = PWRSTS_ON, /* dsp2_l1 */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dsp1_edma */
- [1] = PWRSTS_OFF_RET, /* dsp1_l1 */
- [2] = PWRSTS_OFF_RET, /* dsp1_l2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dsp1_edma */
[1] = PWRSTS_ON, /* dsp1_l1 */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* vip_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* vip_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve4_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve4_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve2_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve2_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve1_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve1_bank */
},
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
2, "timer_sys_ck", NULL, false);
- if (of_have_populated_dt())
- clocksource_probe();
+ clocksource_probe();
}
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
{
__omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
2, "timer_sys_ck", NULL, false);
+
+ clocksource_probe();
}
#endif /* CONFIG_ARCH_OMAP3 */
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon", true);
+
+ clocksource_probe();
}
#endif
init.name = dev_name(cpu_dev);
init.ops = &clk_spc_ops;
- init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &spc->hw);
#include <linux/platform_data/asoc-s3c.h>
#include <linux/platform_data/spi-s3c64xx.h>
-static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
/* AC97 */
#ifdef CONFIG_CPU_S3C2440
config MMU
def_bool y
+config ARM64_PAGE_SHIFT
+ int
+ default 16 if ARM64_64K_PAGES
+ default 14 if ARM64_16K_PAGES
+ default 12
+
+config ARM64_CONT_SHIFT
+ int
+ default 5 if ARM64_64K_PAGES
+ default 7 if ARM64_16K_PAGES
+ default 4
+
config ARCH_MMAP_RND_BITS_MIN
default 14 if ARM64_64K_PAGES
default 16 if ARM64_16K_PAGES
If unsure, say Y.
+config CAVIUM_ERRATUM_23144
+ bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+ depends on NUMA
+ default y
+ help
+ ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_23154
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
default y
who are working in architecture specific areas of the kernel.
It is probably not a good idea to enable this feature in a production
kernel.
- If in doubt, say "N"
+
+ If in doubt, say N.
config PID_IN_CONTEXTIDR
bool "Write the current PID to the CONTEXTIDR register"
value.
config DEBUG_SET_MODULE_RONX
- bool "Set loadable kernel module data as NX and text as RO"
- depends on MODULES
- help
- This option helps catch unintended modifications to loadable
- kernel module's text and read-only data. It also prevents execution
- of module data. Such protection may interfere with run-time code
- patching and dynamic kernel tracing - and they might also protect
- against certain classes of kernel exploits.
- If in doubt, say "N".
+ bool "Set loadable kernel module data as NX and text as RO"
+ depends on MODULES
+ default y
+ help
+ Is this is set, kernel module text and rodata will be made read-only.
+ This is to help catch accidental or malicious attempts to change the
+ kernel's executable code.
+
+ If in doubt, say Y.
config DEBUG_RODATA
bool "Make kernel text and rodata read-only"
is to help catch accidental or malicious attempts to change the
kernel's executable code.
- If in doubt, say Y
+ If in doubt, say Y.
config DEBUG_ALIGN_RODATA
depends on DEBUG_RODATA
alignment and potentially wasted space. Turn on this option if
performance is more important than memory pressure.
- If in doubt, say N
+ If in doubt, say N.
source "drivers/hwtracing/coresight/Kconfig"
# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+ int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+ rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
else
TEXT_OFFSET := 0x00080000
endif
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-Image.%: vmlinux
+Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install:
#size-cells = <1>;
#interrupts-cells = <3>;
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
interrupt-parent = <&gic>;
ranges;
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges;
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define BRCM_CPU_PART_VULCAN 0x516
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#ifndef __ASSEMBLY__
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
-#ifdef CONFIG_COMPAT
-
#ifdef __AARCH64EB__
#define COMPAT_ELF_PLATFORM ("v8b")
#else
#define COMPAT_ELF_PLATFORM ("v8l")
#endif
+#ifdef CONFIG_COMPAT
+
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* AArch32 registers. */
#endif /* !__ASSEMBLY__ */
/*
- * gdb is expecting the following registers layout.
+ * gdb remote procotol (well most versions of it) expects the following
+ * register layout.
*
* General purpose regs:
* r0-r30: 64 bit
* sp,pc : 64 bit
- * pstate : 64 bit
- * Total: 34
+ * pstate : 32 bit
+ * Total: 33 + 1
* FPU regs:
* f0-f31: 128 bit
- * Total: 32
- * Extra regs
* fpsr & fpcr: 32 bit
- * Total: 2
+ * Total: 32 + 2
*
+ * To expand a little on the "most versions of it"... when the gdb remote
+ * protocol for AArch64 was developed it depended on a statement in the
+ * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
+ * and, as a result, allocated only 32-bits for the PSTATE in the remote
+ * protocol. In fact this statement is still present in ARM DDI 0487A.i.
+ *
+ * Unfortunately "is a 32-bit register" has a very special meaning for
+ * system registers. It means that "the upper bits, bits[63:32], are
+ * RES0.". RES0 is heavily used in the ARM architecture documents as a
+ * way to leave space for future architecture changes. So to translate a
+ * little for people who don't spend their spare time reading ARM architecture
+ * manuals, what "is a 32-bit register" actually means in this context is
+ * "is a 64-bit register but one with no meaning allocated to any of the
+ * upper 32-bits... *yet*".
+ *
+ * Perhaps then we should not be surprised that this has led to some
+ * confusion. Specifically a patch, influenced by the above translation,
+ * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
+ * was reverted in gdb-7.8.1 and all later releases, when this was
+ * discovered to be an undocumented protocol change.
+ *
+ * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
+ * here even though the kernel itself allocates 64-bits for the same
+ * state. That is because this bit of code tells the kernel how the gdb
+ * remote protocol (well most versions of it) describes the register state.
+ *
+ * Note that if you are using one of the versions of gdb that supports
+ * the gdb-7.7 version of the protocol you cannot use kgdb directly
+ * without providing a custom register description (gdb can load new
+ * protocol descriptions at runtime).
*/
-#define _GP_REGS 34
+#define _GP_REGS 33
#define _FP_REGS 32
-#define _EXTRA_REGS 2
+#define _EXTRA_REGS 3
/*
* general purpose registers size in bytes.
* pstate is only 4 bytes. subtract 4 bytes
#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * PAGE_OFFSET - the virtual address of the start of the linear map (top
* (VA_BITS - 1))
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
* TASK_SIZE - the maximum size of a user space task.
/* PAGE_SHIFT determines the page size */
/* CONT_SHIFT determines the number of pages which can be tracked together */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define PAGE_SHIFT 16
-#define CONT_SHIFT 5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define PAGE_SHIFT 14
-#define CONT_SHIFT 7
-#else
-#define PAGE_SHIFT 12
-#define CONT_SHIFT 4
-#endif
+#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define check_pgt_cache() do { } while (0)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
};
u64 orig_x0;
u64 syscallno;
+ u64 orig_addr_limit;
+ u64 unused; // maintain 16 byte alignment
};
#define arch_has_single_step() (1)
cpu_park_loop();
}
+/*
+ * If a secondary CPU enters the kernel but fails to come online,
+ * (e.g. due to mismatched features), and cannot exit the kernel,
+ * we increment cpus_stuck_in_kernel and leave the CPU in a
+ * quiesecent loop within the kernel text. The memory containing
+ * this loop must not be re-used for anything else as the 'stuck'
+ * core is executing it.
+ *
+ * This function is used to inhibit features like kexec and hibernate.
+ */
+bool cpus_are_stuck_in_kernel(void);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */
{
unsigned int tmp;
arch_spinlock_t lockval;
+ u32 owner;
+
+ /*
+ * Ensure prior spin_lock operations to other locks have completed
+ * on this CPU before we test whether "lock" is locked.
+ */
+ smp_mb();
+ owner = READ_ONCE(lock->owner) << 16;
asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %2\n"
+ /* Is the lock free? */
" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 1b\n"
+" cbz %w1, 3f\n"
+ /* Lock taken -- has there been a subsequent unlock->lock transition? */
+" eor %w1, %w3, %w0, lsl #16\n"
+" cbz %w1, 1b\n"
+ /*
+ * The owner has been updated, so there was an unlock->lock
+ * transition that we missed. That means we can rely on the
+ * store-release of the unlock operation paired with the
+ * load-acquire of the lock operation to publish any of our
+ * previous stores to the new lock owner and therefore don't
+ * need to bother with the writeback below.
+ */
+" b 4f\n"
+"3:\n"
+ /*
+ * Serialise against any concurrent lockers by writing back the
+ * unlocked lock value
+ */
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
-" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
- /* LSE atomics */
" nop\n"
-" nop\n")
+" nop\n",
+ /* LSE atomics */
+" mov %w1, %w0\n"
+" cas %w0, %w0, %2\n"
+" eor %w1, %w1, %w0\n")
+ /* Somebody else wrote to the lock, GOTO 10 and reload the value */
+" cbnz %w1, 2b\n"
+"4:"
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- :
+ : "r" (owner)
: "memory");
}
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+ smp_mb(); /* See arch_spin_unlock_wait */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
#define segment_eq(a, b) ((a) == (b))
-/*
- * Return 1 if addr < current->addr_limit, 0 otherwise.
- */
-#define __addr_ok(addr) \
-({ \
- unsigned long flag; \
- asm("cmp %1, %0; cset %0, lo" \
- : "=&r" (flag) \
- : "r" (addr), "0" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
-
/*
* Test whether a block of memory is a valid user space address.
* Returns 1 if the range is valid, 0 otherwise.
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 390
+#define __NR_compat_syscalls 394
#endif
#define __ARCH_WANT_SYS_CLONE
__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
#define __NR_membarrier 389
__SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 390
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 391
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 392
+__SYSCALL(__NR_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 393
+__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
/*
* Please add new compat syscalls above this comment and update
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
+ DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 1),
},
+ {
+ /* Cavium ThunderX, T81 pass 1.0 */
+ .desc = "Cavium erratum 27456",
+ .capability = ARM64_WORKAROUND_CAVIUM_27456,
+ MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+ },
#endif
{
}
#include <linux/bitops.h>
#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/personality.h>
static int c_show(struct seq_file *m, void *v)
{
int i, j;
+ bool compat = personality(current->personality) == PER_LINUX32;
for_each_online_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
* "processor". Give glibc what it expects.
*/
seq_printf(m, "processor\t: %d\n", i);
+ if (compat)
+ seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+ MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
* software which does already (at least for 32-bit).
*/
seq_puts(m, "Features\t:");
- if (personality(current->personality) == PER_LINUX32) {
+ if (compat) {
#ifdef CONFIG_COMPAT
for (j = 0; compat_hwcap_str[j]; j++)
if (compat_elf_hwcap & (1 << j))
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
- .endif
+ get_thread_info tsk
+ /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ ldr x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [sp, #S_ORIG_ADDR_LIMIT]
+ mov x20, #TASK_SIZE_64
+ str x20, [tsk, #TI_ADDR_LIMIT]
+ ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
+ .endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
.endm
.macro kernel_exit, el
+ .if \el != 0
+ /* Restore the task's original addr_limit. */
+ ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
+ str x20, [tsk, #TI_ADDR_LIMIT]
+
+ /* No need to restore UAO, it will be restored from SPSR_EL1 */
+ .endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
bl trace_hardirqs_off
#endif
- get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
+#include <asm/smp.h>
#include <asm/suspend.h>
#include <asm/virt.h>
unsigned long flags;
struct sleep_stack_data state;
+ if (cpus_are_stuck_in_kernel()) {
+ pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+ return -EBUSY;
+ }
+
local_dbg_save(flags);
if (__cpu_suspend_enter(&state)) {
{ "x30", 8, offsetof(struct pt_regs, regs[30])},
{ "sp", 8, offsetof(struct pt_regs, sp)},
{ "pc", 8, offsetof(struct pt_regs, pc)},
- { "pstate", 8, offsetof(struct pt_regs, pstate)},
+ /*
+ * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+ * protocol disagrees. Therefore we must extract only the lower
+ * 32-bits. Look for the big comment in asm/kgdb.h for more
+ * detail.
+ */
+ { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ + 4
+#endif
+ },
{ "v0", 16, -1 },
{ "v1", 16, -1 },
{ "v2", 16, -1 },
memset((char *)gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+ /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
+ dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
return -EINVAL;
}
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int any_cpu = raw_smp_processor_id();
+
+ if (cpu_ops[any_cpu]->cpu_die)
+ return true;
+#endif
+ return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+ bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+ return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
/*
* We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * to safely read from kernel space.
*/
fs = get_fs();
set_fs(KERNEL_DS);
print_ip_sym(where);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
for (i = -4; i < 1; i++) {
unsigned int val, bad;
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ if (!user_mode(regs)) {
+ mm_segment_t fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
- pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
- handler[reason], esr, esr_get_class_string(esr));
+ pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+ handler[reason], smp_processor_id(), esr,
+ esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
* Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface.
*/
- dsb(st);
+ if (!cpu_if->vgic_sre)
+ dsb(st);
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
continue;
- if (cpu_if->vgic_elrsr & (1 << i)) {
+ if (cpu_if->vgic_elrsr & (1 << i))
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
- continue;
- }
+ else
+ cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
- cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
__gic_v3_set_lr(0, i);
}
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
- isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
- write_gicreg(1, ICC_SRE_EL1);
+
+ if (!cpu_if->vgic_sre) {
+ /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+ isb();
+ write_gicreg(1, ICC_SRE_EL1);
+ }
}
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* been actually programmed with the value we want before
* starting to mess with the rest of the GIC.
*/
- write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
- isb();
+ if (!cpu_if->vgic_sre) {
+ write_gicreg(0, ICC_SRE_EL1);
+ isb();
+ }
val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val);
* (re)distributors. This ensure the guest will read the
* correct values from the memory-mapped interface.
*/
- isb();
- dsb(sy);
+ if (!cpu_if->vgic_sre) {
+ isb();
+ dsb(sy);
+ }
vcpu->arch.vgic_cpu.live_lrs = live_lrs;
/*
* Prevent the guest from touching the GIC system registers if
* SRE isn't enabled for GICv3 emulation.
*/
- if (!cpu_if->vgic_sre) {
- write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
- ICC_SRE_EL2);
- }
+ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+ ICC_SRE_EL2);
}
void __hyp_text __vgic_v3_init_lrs(void)
return true;
}
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+ return true;
+}
+
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
access_gic_sgi },
/* ICC_SRE_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
- trap_raz_wi },
+ access_gic_sre },
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
&asid_generation);
flush_context(cpu);
- /* We have at least 1 ASID per CPU, so this will always succeed */
+ /* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid:
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
- /* If we end up with more CPUs than ASIDs, expect things to crash */
- WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+ */
+ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
GFP_KERNEL);
struct pg_level {
const struct prot_bits *bits;
+ const char *name;
size_t num;
u64 mask;
};
static struct pg_level pg_level[] = {
{
}, { /* pgd */
+ .name = "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pud */
+ .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pmd */
+ .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
}, { /* pte */
+ .name = "PTE",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
},
delta >>= 10;
unit++;
}
- seq_printf(st->seq, "%9lu%c", delta, *unit);
+ seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pg_level[st->level].name);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits,
pg_level[st->level].num);
* PTE_RDONLY is cleared by default in the asm below, so set it in
* back if necessary (read-only or clean PTE).
*/
- if (!pte_write(entry) || !dirty)
+ if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY;
/*
}
if (permission_fault(esr) && (addr < USER_DS)) {
- if (get_fs() == KERNEL_DS)
+ /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+ if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (!search_exception_tables(regs->pc))
return 1;
}
-static struct fault_info {
+static const struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
int sig;
int code;
{
struct page *page = pte_page(pte);
- /* no flushing needed for anonymous pages */
- if (!page_mapping(page))
- return;
-
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else if (ps == (PAGE_SIZE * CONT_PTES)) {
+ hugetlb_add_hstate(CONT_PTE_SHIFT);
+ } else if (ps == (PMD_SIZE * CONT_PMDS)) {
+ hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+ if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+ hugetlb_add_hstate(CONT_PMD_SHIFT);
+ return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+ return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
void *pg;
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
struct page *page;
#ifdef CONFIG_HIGHPTE
- page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+ page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
- page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ page = alloc_pages(GFP_KERNEL, 0);
#endif
if (!page)
return NULL;
{
struct page *pte;
- pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
}
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
- select ARCH_THREAD_INFO_ALLOCATOR
+ select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD
select SYSCTL_ARCH_UNALIGN_NO_WARN
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node) \
- ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
-#define free_thread_info(ti) /* nothing */
+#define free_thread_stack(ti) /* nothing */
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
* handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
+#define init_stack init_task_mem.stack
union {
struct {
#include <asm/processor.h>
-static void putc(char c);
+static void m32r_putc(char c);
static int puts(const char *s)
{
char c;
- while ((c = *s++)) putc(c);
+ while ((c = *s++))
+ m32r_putc(c);
return 0;
}
#define BOOT_SIO0TXB PLD_ESIO0TXB
#endif
-static void putc(char c)
+static void m32r_putc(char c)
{
while ((*BOOT_SIO0STS & 0x3) != 0x3)
cpu_relax();
#define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30)
#endif
-static void putc(char c)
+static void m32r_putc(char c)
{
while ((*SIO0STS & 0x1) == 0)
cpu_relax();
#endif
/*
- * Assember start up done, start code proper.
+ * Assembler start up done, start code proper.
*/
jsr start_kernel /* start Linux kernel */
/***************************************************************************/
/*
- * Some 5272 based boards have the FEC ethernet diectly connected to
+ * Some 5272 based boards have the FEC ethernet directly connected to
* an ethernet switch. In this case we need to use the fixed phy type,
* and we need to declare it early in boot.
*/
/*
* We need to be carefull probing on bus 0 (directly connected to host
- * bridge). We should only acccess the well defined possible devices in
+ * bridge). We should only access the well defined possible devices in
* use, ignore aliases and the like.
*/
static unsigned char mcf_host_slot2sid[32] = {
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
xdnrm_sd:
mov.l %a1,-(%sp)
tst.b LOCAL_EX(%a0) # is denorm pos or neg?
- smi.b %d1 # set d0 accodingly
+ smi.b %d1 # set d0 accordingly
bsr.l unf_sub
mov.l (%sp)+,%a1
xdnrm_exit:
# routines where an instruction is selected by an index into
# a large jump table corresponding to a given instruction which
# has been decoded. Flow continues here where we now decode
-# further accoding to the source operand type.
+# further according to the source operand type.
#
global fsinh
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
-# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
-# 2. Check the digits in lwords 3 and 2 in decending order.
+# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
-# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
-# 2. Check the digits in lwords 3 and 2 in decending order.
+# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
* AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
* APR/18/2002 : added proper support for MCF5272 DMA controller.
/*
* I2C module.
*/
-#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base addreess I2C0 */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base address I2C0 */
#define MCFI2C_SIZE0 0x20 /* Register set size */
-#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base addreess I2C1 */
+#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base address I2C1 */
#define MCFI2C_SIZE1 0x20 /* Register set size */
/*
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+ unsigned long page = __get_free_page(GFP_DMA);
if (!page)
return NULL;
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+ struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte;
if (!page)
/*
* MMU Operation register.
*/
-#define MMUOR_UAA 0x00000001 /* Update allocatiom address */
+#define MMUOR_UAA 0x00000001 /* Update allocation address */
#define MMUOR_ACC 0x00000002 /* TLB access */
#define MMUOR_RD 0x00000004 /* TLB access read */
#define MMUOR_WR 0x00000000 /* TLB access write */
{
pte_t *pte;
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte) {
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
struct page *page;
pte_t *pte;
- page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
if (!pgtable_page_ctor(page)) {
/*
* Q40 master Chip Control
- * RTC stuff merged for compactnes..
+ * RTC stuff merged for compactness.
*/
#ifndef _Q40_MASTER_H
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ unsigned long page = __get_free_page(GFP_KERNEL);
if (!page)
return NULL;
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ struct page *page = alloc_pages(GFP_KERNEL, 0);
if (page == NULL)
return NULL;
*
* The host talks to the IOPs using a rather simple message-passing scheme via
* a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
- * channel is conneced to a specific software driver on the IOP. For example
+ * channel is connected to a specific software driver on the IOP. For example
* on the SCC IOP there is one channel for each serial port. Each channel has
* an incoming and and outgoing message queue with a depth of one.
*
bfextu %d2{#13,#3},%d0
.endm
-| decode the 8bit diplacement from the brief extension word
+| decode the 8bit displacement from the brief extension word
.macro fp_decode_disp8
move.b %d2,%d0
ext.w %d0
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
- __GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
return pte;
}
unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
+ int flags = GFP_KERNEL;
#endif
ptepage = alloc_pages(flags, 0);
{
pte_t *pte;
if (mem_init_done) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL |
- __GFP_REPEAT | __GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
#define KVM_GUEST_KUSEG 0x00000000UL
#define KVM_GUEST_KSEG0 0x40000000UL
#define KVM_GUEST_KSEG23 0x60000000UL
-#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack;
unsigned long host_gp;
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
{
pmd_t *pmd;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+ pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
- _CACHE_CACHABLE_NONCOHERENT)
+ _page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
- pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}
if (index < 0) {
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
vcpu);
preempt_enable();
* invalid exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
+ vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
run, vcpu);
preempt_enable();
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
preempt_enable();
- goto dont_update_pc;
+ goto done;
}
kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
- preempt_enable();
- goto dont_update_pc;
}
preempt_enable();
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
dont_update_pc:
- /* Rollback PC */
- vcpu->arch.pc = curr_pc;
-done:
+ /*
+ * This is for exceptions whose emulation updates the PC, so do not
+ * overwrite the PC under any circumstances
+ */
+
return er;
}
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
+extern char __kvm_mips_vcpu_run_end[];
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
/* Jump to guest */
eret
+EXPORT(__kvm_mips_vcpu_run_end)
VECTOR(MIPSX(exception), unknown)
/* Find out what mode we came from and jump to the proper handler. */
memcpy(gebase + offset, mips32_GuestException,
mips32_GuestExceptionEnd - mips32_GuestException);
+#ifdef MODULE
+ offset += mips32_GuestExceptionEnd - mips32_GuestException;
+ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+ vcpu->arch.vcpu_run = gebase + offset;
+#else
+ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/* Disable hardware page table walking while in guest */
htw_stop();
- r = __kvm_mips_vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(run, vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *ti = (void *)stack;
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
struct page *pte;
#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL, 0);
#endif
if (!pte)
return NULL;
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
- PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (pte) {
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL, 0);
if (!pte)
return NULL;
clear_page(page_address(pte));
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
#if 0
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
- PMD_ORDER);
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
return pmd;
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
void parisc_terminate(char *msg, struct pt_regs *regs,
int code, unsigned long offset) __noreturn __cold;
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
/* mm/fault.c */
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address);
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
- printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
- cpunum, coproc_cfg.revision, coproc_cfg.model);
+ if (cpunum == 0)
+ printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
+ cpunum, coproc_cfg.revision, coproc_cfg.model);
/*
** store status register to stack (hopefully aligned)
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
NSEC_PER_MSEC, 0);
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
- /* At bootup only one 64bit CPU is online and cr16 is "stable" */
- set_sched_clock_stable();
-#endif
-
start_cpu_itimer(); /* get CPU 0 started */
/* register at clocksource framework */
#include <linux/ratelimit.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
+#include <asm/traps.h>
/* #define DEBUG_UNALIGNED 1 */
int unaligned_enabled __read_mostly = 1;
-void die_if_kernel (char *str, struct pt_regs *regs, long err);
-
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
break;
}
- if (modify && R1(regs->iir))
+ if (ret == 0 && modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
if (ret)
{
+ /*
+ * The unaligned handler failed.
+ * If we were called by __get_user() or __put_user() jump
+ * to it's exception fixup handler instead of crashing.
+ */
+ if (!user_mode(regs) && fixup_exception(regs))
+ return;
+
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
- else
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
break;
}
}
+ spin_unlock_irqrestore(&unwind_lock, flags);
+ }
return e;
}
insn = *(unsigned int *)npc;
- if ((insn & 0xffffc000) == 0x37de0000 ||
- (insn & 0xffe00000) == 0x6fc00000) {
+ if ((insn & 0xffffc001) == 0x37de0000 ||
+ (insn & 0xffe00001) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
- frame_size += (insn & 0x1 ? -1 << 13 : 0) |
- ((insn & 0x3fff) >> 1);
+ frame_size += (insn & 0x3fff) >> 1;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
- } else if ((insn & 0xffe00008) == 0x73c00008) {
+ } else if ((insn & 0xffe00009) == 0x73c00008) {
/* std,ma X,D(sp) */
- frame_size += (insn & 0x1 ? -1 << 13 : 0) |
- (((insn >> 4) & 0x3ff) << 3);
+ frame_size += ((insn >> 4) & 0x3ff) << 3;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
}
}
+ if (frame_size > e->Total_frame_size << 3)
+ frame_size = e->Total_frame_size << 3;
+
if (!unwind_special(info, e->region_start, frame_size)) {
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_CBPF_JIT
+ select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
}
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
#define HPTE_R_G ASM_CONST(0x0000000000000008)
#define HPTE_R_M ASM_CONST(0x0000000000000010)
pgtable_cache[(shift) - 1]; \
})
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int);
return (pgd_t *)__get_free_page(PGALLOC_GFP);
#else
struct page *page;
- page = alloc_pages(PGALLOC_GFP, 4);
+ page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
if (!page)
return NULL;
return (pgd_t *) page_address(page);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- tlb_flush_pgtable(tlb, address);
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, 0);
}
#define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
+extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */
#include <asm/book3s/64/hash.h>
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+ unsigned long rts_field;
+ /*
+ * we support 52 bits, hence 52-31 = 21, 0b10101
+ * RTS encoding details
+ * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+ * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+ */
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
+ return rts_field;
+}
#endif /* __ASSEMBLY__ */
#endif
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
#else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif
#endif
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+ /*
+ * Flush the page table walk cache on freeing a page table. We already
+ * have marked the upper/higher level page table entry none by now.
+ * So it is safe to flush PWC here.
+ */
+ if (!radix_enabled())
+ return;
+ radix__flush_tlb_pwc(tlb, address);
+}
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
#include <linux/mm.h>
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
- unsigned long address)
-{
-
-}
#ifdef CONFIG_PPC64
#include <asm/book3s/64/pgalloc.h>
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- pte_fragment_fre((unsigned long *)pte, 1);
+ pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
-#define SPRN_MMCR2 769
+#define SPRN_MMCR2 785
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
#define SPRN_PMC6 792
#define SPRN_PMC7 793
#define SPRN_PMC8 794
-#define SPRN_SIAR 780
-#define SPRN_SDAR 781
#define SPRN_SIER 784
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_SIAR 796
+#define SPRN_SDAR 797
#define SPRN_TACR 888
#define SPRN_TCSCR 889
#define SPRN_CSIGR 890
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
} else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
}
} else if (frozen_bus) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
}
/*
*/
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
- if (pe->type & EEH_PE_VF)
+ if (pe->type & EEH_PE_VF) {
eeh_add_virt_device(edev, NULL);
- else
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_hp_add_devices(bus);
+ }
} else if (frozen_bus && rmv_data->removed) {
pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
ssleep(5);
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
-BEGIN_MMU_FTR_SECTION
- b 2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+BEGIN_MMU_FTR_SECTION
beq- 2f
+FTR_SECTION_ELSE
+ b 2f
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
.machine push
.machine "power4"
printk(KERN_INFO "PCI: Probing PCI hardware\n");
- pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
current->thread.regs = regs - 1;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Clear any transactional state, we're exec()ing. The cause is
+ * not important as there will never be a recheckpoint so it's not
+ * user visible.
+ */
+ if (MSR_TM_SUSPENDED(mfmsr()))
+ tm_reclaim_current(0);
+#endif
+
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
W(0xffff0000), W(0x004b0000), /* POWER8E */
+ W(0xffff0000), W(0x004c0000), /* POWER8NVL */
W(0xffff0000), W(0x004d0000), /* POWER8 */
W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 125
+#define IBM_ARCH_VEC_NRCORES_OFFSET 133
W(NR_CPUS), /* number of cores supported */
0,
0,
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32][0]));
+ offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32][0]));
+ offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
- /* We need to setup MSR for VSX register save instructions. Here we
- * also clear the MSR RI since when we do the treclaim, we won't have a
- * valid kernel pointer for a while. We clear RI here as it avoids
- * adding another mtmsr closer to the treclaim. This makes the region
- * maked as non-recoverable wider than it needs to be but it saves on
- * inserting another mtmsrd later.
- */
+ /* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
- li r16, MSR_RI
+ li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
- /* The moment we treclaim, ALL of our GPRs will switch
+ /* Clear MSR RI since we are about to change r1, EE is already off. */
+ li r4, 0
+ mtmsrd r4, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ *
+ * The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+
+ /* Reset MSR RI so we can take SLB faults again */
+ li r11, MSR_RI
+ mtmsrd r11, 1
+
mfspr r11, SPRN_PPR
HMT_MEDIUM
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)
- /* Clear the MSR RI since we are about to change R1. EE is already off
- */
- li r4, 0
- mtmsrd r4, 1
-
REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
ld r6, _CCR(r7)
mtcr r6
- REST_GPR(1, r7) /* GPR1 */
- REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
- ld r7, GPR7(r7)
+
+ /*
+ * Store r1 and r5 on the stack so that we can access them
+ * after we clear MSR RI.
+ */
+
+ REST_GPR(5, r7)
+ std r5, -8(r1)
+ ld r5, GPR1(r7)
+ std r5, -16(r1)
+
+ REST_GPR(7, r7)
+
+ /* Clear MSR RI since we are about to change r1. EE is already off */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ */
+
+ ld r5, -8(r1)
+ ld r1, -16(r1)
/* Commit register state as checkpointed state: */
TRECHKPT
DBG_LOW(" -> hit\n");
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N |
+ ~(HPTE_R_PPP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PPP | HPTE_R_N |
HPTE_R_C)));
}
native_unlock_hpte(hptep);
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N)));
+ ~(HPTE_R_PPP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PPP | HPTE_R_N)));
/*
* Ensure it is out of the tlb too. Bolted entries base and
* actual page size will be same.
}
}
/* This works for all page sizes, and for 256M and 1T segments */
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
+ else
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+
shift = mmu_psize_defs[size].shift;
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
},
};
+/*
+ * 'R' and 'C' update notes:
+ * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
+ * create writeable HPTEs without C set, because the hcall H_PROTECT
+ * that we use in that case will not update C
+ * - The above is however not a problem, because we also don't do that
+ * fancy "no flush" variant of eviction and we use H_REMOVE which will
+ * do the right thing and thus we don't have the race I described earlier
+ *
+ * - Under bare metal, we do have the race, so we need R and C set
+ * - We make sure R is always set and never lost
+ * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
+ */
unsigned long htab_convert_pte_flags(unsigned long pteflags)
{
unsigned long rflags = 0;
rflags |= 0x1;
}
/*
- * Always add "C" bit for perf. Memory coherence is always enabled
+ * We can't allow hardware to update hpte bits. Hence always
+ * set 'R' bit and set 'C' if it is a write fault
*/
- rflags |= HPTE_R_C | HPTE_R_M;
+ rflags |= HPTE_R_R;
+
+ if (pteflags & _PAGE_DIRTY)
+ rflags |= HPTE_R_C;
/*
* Add in WIG bits
*/
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
rflags |= HPTE_R_I;
- if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
- if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
- rflags |= (HPTE_R_I | HPTE_R_W);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
+ else
+ /*
+ * Add memory coherence if cache inhibited is not set
+ */
+ rflags |= HPTE_R_M;
return rflags;
}
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
+#ifdef CONFIG_PCI
+ pci_io_base = ISA_IO_BASE;
+#endif
+
/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
cachep = PGT_CACHE(pdshift - pshift);
#endif
- new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
+ new = kmem_cache_zalloc(cachep, GFP_KERNEL);
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
/*
* set the process table entry,
*/
- rts_field = 3ull << PPC_BITLSHIFT(2);
+ rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
return 0;
}
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
- /*
- * Since we are not supporting SW TLB systems, we don't
- * have any thing similar to flush_tlb_page_nohash()
- */
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/*
* Fill in the process table.
- * we support 52 bits, hence 52-28 = 24, 11000
*/
- rts_field = 3ull << PPC_BITLSHIFT(2);
+ rts_field = radix__get_tree_size();
process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
/*
* Fill in the partition table. We are suppose to use effective address
static void __init radix_init_partition_table(void)
{
unsigned long rts_field;
- /*
- * we support 52 bits, hence 52-28 = 24, 11000
- */
- rts_field = 3ull << PPC_BITLSHIFT(2);
+
+ rts_field = radix__get_tree_size();
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
- /*
- * setup LPCR UPRT based on mmu_features
- */
- lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
#ifdef CONFIG_PPC_64K_PAGES
/* PAGE_SIZE mappings */
__vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
+
+#ifdef CONFIG_PCI
+ pci_io_base = ISA_IO_BASE;
+#endif
+
/*
* For now radix also use the same frag size
*/
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
radix_init_page_sizes();
- if (!firmware_has_feature(FW_FEATURE_LPAR))
+ if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
radix_init_partition_table();
+ }
radix_init_pgtable();
}
{
unsigned long lpcr;
/*
- * setup LPCR UPRT based on mmu_features
- */
- lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
- /*
- * update partition table control register, 64 K size.
+ * update partition table control register and UPRT
*/
- if (!firmware_has_feature(FW_FEATURE_LPAR))
+ if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+
mtspr(SPRN_PTCR,
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+ }
}
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
pte_t *pte;
if (slab_is_available()) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
{
struct page *ptepage;
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
ptepage = alloc_pages(flags, 0);
if (!ptepage)
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (!page)
return NULL;
if (!kernel && !pgtable_page_ctor(page)) {
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbiel_pid(unsigned long pid, int set)
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+static inline void __tlbiel_pid(unsigned long pid, int set,
+ unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rb |= set << PPC_BITLSHIFT(51);
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static inline void _tlbiel_pid(unsigned long pid)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
- __tlbiel_pid(pid, set);
+ __tlbiel_pid(pid, set, ric);
}
return;
}
-static inline void _tlbie_pid(unsigned long pid)
+static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
}
static inline void _tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap)
+ unsigned long ap, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap)
+ unsigned long ap, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
*/
void radix__local_flush_tlb_mm(struct mm_struct *mm)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_pid(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ unsigned long pid;
+ struct mm_struct *mm = tlb->mm;
+
+ preempt_disable();
+
+ pid = mm->context.id;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_va(vmaddr, pid, ap);
+ _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
preempt_enable();
}
void radix__flush_tlb_mm(struct mm_struct *mm)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm->context.id;
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_pid(pid);
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
- _tlbiel_pid(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
preempt_enable();
}
EXPORT_SYMBOL(radix__flush_tlb_mm);
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ unsigned long pid;
+ struct mm_struct *mm = tlb->mm;
+
+ preempt_disable();
+
+ pid = mm->context.id;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+
+ if (!mm_is_core_local(mm)) {
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+ raw_spin_lock(&native_tlbie_lock);
+ _tlbie_pid(pid, RIC_FLUSH_PWC);
+ if (lock_tlbie)
+ raw_spin_unlock(&native_tlbie_lock);
+ } else
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+ preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_va(vmaddr, pid, ap);
+ _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
- _tlbiel_va(vmaddr, pid, ap);
+ _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
preempt_enable();
}
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_pid(0);
+ _tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}
/* convenience wrappers around the common clk API */
static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mpc512x_clk_factor(
if (rc < 0)
goto out;
- skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+ skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
out:
static int ibm_slot_error_detail;
static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
static int ibm_configure_pe;
/*
ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
ibm_configure_pe = rtas_token("ibm,configure-pe");
- ibm_configure_bridge = rtas_token("ibm,configure-bridge");
+
+ /*
+ * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+ * however ibm,configure-pe can be faster. If we can't find
+ * ibm,configure-pe then fall back to using ibm,configure-bridge.
+ */
+ if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+ ibm_configure_pe = rtas_token("ibm,configure-bridge");
/*
* Necessary sanity check. We needn't check "get-config-addr-info"
(ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
- (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
- ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+ ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
pr_info("EEH functionality not supported\n");
return -EINVAL;
}
{
int config_addr;
int ret;
+ /* Waiting 0.2s maximum before skipping configuration */
+ int max_wait = 200;
/* Figure out the PE address */
config_addr = pe->config_addr;
if (pe->addr)
config_addr = pe->addr;
- /* Use new configure-pe function, if supported */
- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+ while (max_wait > 0) {
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
- } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
- ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid));
- } else {
- return -EFAULT;
- }
- if (ret)
- pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
- __func__, pe->phb->global_number, pe->addr, ret);
+ if (!ret)
+ return ret;
+
+ /*
+ * If RTAS returns a delay value that's above 100ms, cut it
+ * down to 100ms in case firmware made a mistake. For more
+ * on how these delay values work see rtas_busy_delay_time
+ */
+ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+ ret <= RTAS_EXTENDED_DELAY_MAX)
+ ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+ max_wait -= rtas_busy_delay_time(ret);
+
+ if (max_wait < 0)
+ break;
+
+ rtas_busy_delay(ret);
+ }
+ pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+ __func__, pe->phb->global_number, pe->addr, ret);
return ret;
}
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
- cfg_addr = (pdn->busno << 8) | pdn->devfn;
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
- cfg_addr = (pdn->busno << 8) | pdn->devfn;
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_ZPOOL=m
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024
CONFIG_READABLE_ASM=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_VMACACHE=y
CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_KPROBE_EVENT is not set
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_RBTREE_TEST=m
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
# CONFIG_SWAP is not set
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
-# CONFIG_STRICT_DEVMEM is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS_FS is not set
# CONFIG_VIRTUALIZATION is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_USELIB=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
CONFIG_L2TP=m
CONFIG_L2TP_DEBUGFS=m
CONFIG_VLAN_8021Q=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
" la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (orig_fpc)
+ : "=d" (rc), "=&d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL));
return rc;
}
u32 exit_stop_request;
u32 exit_validity;
u32 exit_instruction;
+ u32 exit_pei;
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_poll_invalid;
S390_lowcore.program_new_psw.addr =
(unsigned long) s390_base_pgm_handler;
- /*
- * Clear subchannel ID and number to signal new kernel that no CCW or
- * SCSI IPL has been done (for kexec and kdump)
- */
- S390_lowcore.subchannel_id = 0;
- S390_lowcore.subchannel_nr = 0;
-
do_reset_calls();
}
/* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = {
+ .task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
.pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable,
.event_init = cpumf_pmu_event_init,
goto out;
}
- /* The CPU measurement counter facility does not have overflow
- * interrupts to do sampling. Sampling must be provided by
- * external means, for example, by timers.
- */
- cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
+ vcpu->stat.exit_pei++;
+
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu);
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
{ "exit_external_request", VCPU_STAT(exit_external_request) },
{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
{ "exit_instruction", VCPU_STAT(exit_instruction) },
+ { "exit_pei", VCPU_STAT(exit_pei) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
kvm->arch.model.cpuid = proc->cpuid;
lowest_ibc = sclp.ibc >> 16 & 0xfff;
unblocked_ibc = sclp.ibc & 0xfff;
- if (lowest_ibc) {
+ if (lowest_ibc && proc->ibc) {
if (proc->ibc > unblocked_ibc)
kvm->arch.model.ibc = unblocked_ibc;
else if (proc->ibc < lowest_ibc)
report_user_fault(regs, SIGSEGV, 1);
si.si_signo = SIGSEGV;
+ si.si_errno = 0;
si.si_code = si_code;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
return table;
}
/* Allocate a fresh page */
- page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ page = alloc_page(GFP_KERNEL);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste);
pte = *ptep;
- if (pte_swap(pte) &&
+ if (!reset && pte_swap(pte) &&
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO))) {
ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
* | | |
* +---------------+ |
* | 8 byte skbp | |
- * R15+170 -> +---------------+ |
+ * R15+176 -> +---------------+ |
* | 8 byte hlen | |
* R15+168 -> +---------------+ |
* | 4 byte align | |
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
-#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
+#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
int labels[1]; /* Labels for local jumps */
};
-#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
+#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
#define SEEN_SKB 1 /* skb access */
#define SEEN_MEM 2 /* use mem[] for temporary storage */
emit_load_skb_data_hlen(jit);
if (jit->seen & SEEN_SKB_CHANGE)
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
STK_OFF_SKBP);
}
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
- PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
void *pg;
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
page = virt_to_page(pg);
#include <linux/mm.h>
#include <linux/slab.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
static struct kmem_cache *pgd_cachep;
#if PAGETABLE_LEVELS > 2
#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
+#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
#define __CHEETAH_ID 0x003e0014
#define __JALAPENO_ID 0x003e0016
#define __SERRANO_ID 0x003e0022
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache,
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache,
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
restored; \
nop; nop; nop; nop; nop; nop; \
nop; nop; nop; nop; nop; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
ba,a,pt %xcc, user_rtt_fill_fixup;
restored; \
nop; nop; nop; nop; nop; \
nop; nop; nop; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
- ba,a,pt %xcc, user_rtt_fill_fixup; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
+ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
ba,a,pt %xcc, user_rtt_fill_fixup;
CFLAGS_REMOVE_pcr.o := -pg
endif
+obj-$(CONFIG_SPARC64) += urtt_fill.o
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
obj-$(CONFIG_SPARC32) += rtrap_32.o
#include <asm/visasm.h>
#include <asm/processor.h>
-#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
-#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
-
#ifdef CONFIG_CONTEXT_TRACKING
# define SCHEDULE_USER schedule_user
#else
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
-user_rtt_fill_fixup:
- rdpr %cwp, %g1
- add %g1, 1, %g1
- wrpr %g1, 0x0, %cwp
-
- rdpr %wstate, %g2
- sll %g2, 3, %g2
- wrpr %g2, 0x0, %wstate
-
- /* We know %canrestore and %otherwin are both zero. */
-
- sethi %hi(sparc64_kern_pri_context), %g2
- ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
- mov PRIMARY_CONTEXT, %g1
-
-661: stxa %g2, [%g1] ASI_DMMU
- .section .sun4v_1insn_patch, "ax"
- .word 661b
- stxa %g2, [%g1] ASI_MMU
- .previous
-
- sethi %hi(KERNBASE), %g1
- flush %g1
+user_rtt_fill_fixup_dax:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ mov 1, %g3
- or %g4, FAULT_CODE_WINFIXUP, %g4
- stb %g4, [%g6 + TI_FAULT_CODE]
- stx %g5, [%g6 + TI_FAULT_ADDR]
+user_rtt_fill_fixup_mna:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ mov 2, %g3
- mov %g6, %l1
- wrpr %g0, 0x0, %tl
-
-661: nop
- .section .sun4v_1insn_patch, "ax"
- .word 661b
- SET_GL(0)
- .previous
-
- wrpr %g0, RTRAP_PSTATE, %pstate
-
- mov %l1, %g6
- ldx [%g6 + TI_TASK], %g4
- LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
- call do_sparc64_fault
- add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+user_rtt_fill_fixup:
+ ba,pt %xcc, user_rtt_fill_fixup_common
+ clr %g3
user_rtt_pre_restore:
add %g1, 1, %g1
return 0;
}
+/* Checks if the fp is valid. We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 15) ||
+ ((unsigned long)fp) > 0x100000000ULL - fplen)
+ return true;
+ return false;
+}
+
void do_sigreturn32(struct pt_regs *regs)
{
struct signal_frame32 __user *sf;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
- unsigned int psr;
+ unsigned int psr, ufp;
unsigned int pc, npc;
sigset_t set;
compat_sigset_t seta;
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 3))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv;
+
+ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
goto segv;
- if (get_user(pc, &sf->info.si_regs.pc) ||
+ if (__get_user(pc, &sf->info.si_regs.pc) ||
__get_user(npc, &sf->info.si_regs.npc))
goto segv;
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
{
struct rt_signal_frame32 __user *sf;
- unsigned int psr, pc, npc;
+ unsigned int psr, pc, npc, ufp;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
sigset_t set;
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 3))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
- if (get_user(pc, &sf->regs.pc) ||
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
+ goto segv;
+
+ if (__get_user(pc, &sf->regs.pc) ||
__get_user(npc, &sf->regs.npc))
goto segv;
force_sig(SIGSEGV, current);
}
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp, int fplen)
-{
- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
- return 1;
- return 0;
-}
-
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp;
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+/* Checks if the fp is valid. We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static inline bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
+ return true;
+
+ return false;
+}
+
asmlinkage void do_sigreturn(struct pt_regs *regs)
{
+ unsigned long up_psr, pc, npc, ufp;
struct signal_frame __user *sf;
- unsigned long up_psr, pc, npc;
sigset_t set;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv_and_exit;
+
+ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
goto segv_and_exit;
- if (((unsigned long) sf) & 3)
+ if (ufp & 0x7)
goto segv_and_exit;
err = __get_user(pc, &sf->info.si_regs.pc);
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
{
struct rt_signal_frame __user *sf;
- unsigned int psr, pc, npc;
+ unsigned int psr, pc, npc, ufp;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
- (((unsigned long) sf) & 0x03))
+ if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ goto segv;
+
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ goto segv;
+
+ if (ufp & 0x7)
goto segv;
err = __get_user(pc, &sf->regs.pc);
force_sig(SIGSEGV, current);
}
-/* Checks if the fp is valid */
-static inline int invalid_frame_pointer(void __user *fp, int fplen)
-{
- if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
- return 1;
-
- return 0;
-}
-
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP];
goto out;
}
+/* Checks if the fp is valid. We always build rt signal frames which
+ * are 16-byte aligned, therefore we can always enforce that the
+ * restore frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp)
+{
+ if (((unsigned long) fp) & 15)
+ return true;
+ return false;
+}
+
struct rt_signal_frame {
struct sparc_stackf ss;
siginfo_t info;
void do_rt_sigreturn(struct pt_regs *regs)
{
+ unsigned long tpc, tnpc, tstate, ufp;
struct rt_signal_frame __user *sf;
- unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
- if (((unsigned long) sf) & 3)
+ if (invalid_frame_pointer(sf))
+ goto segv;
+
+ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
goto segv;
- err = get_user(tpc, &sf->regs.tpc);
+ if ((ufp + STACK_BIAS) & 0x7)
+ goto segv;
+
+ err = __get_user(tpc, &sf->regs.tpc);
err |= __get_user(tnpc, &sf->regs.tnpc);
if (test_thread_flag(TIF_32BIT)) {
tpc &= 0xffffffff;
force_sig(SIGSEGV, current);
}
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp)
-{
- if (((unsigned long) fp) & 15)
- return 1;
- return 0;
-}
-
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err;
+
+ if (((unsigned long) fpu) & 3)
+ return -EFAULT;
+
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU))
regs->psr &= ~PSR_EF;
struct thread_info *t = current_thread_info();
int i, wsaved, err;
- __get_user(wsaved, &rp->wsaved);
+ if (((unsigned long) rp) & 3)
+ return -EFAULT;
+
+ get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
unsigned long fprs;
int err;
- err = __get_user(fprs, &fpu->si_fprs);
+ if (((unsigned long) fpu) & 7)
+ return -EFAULT;
+
+ err = get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
struct thread_info *t = current_thread_info();
int i, wsaved, err;
- __get_user(wsaved, &rp->wsaved);
+ if (((unsigned long) rp) & 7)
+ return -EFAULT;
+
+ get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
--- /dev/null
+#include <asm/thread_info.h>
+#include <asm/trap_block.h>
+#include <asm/spitfire.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+
+ .text
+ .align 8
+ .globl user_rtt_fill_fixup_common
+user_rtt_fill_fixup_common:
+ rdpr %cwp, %g1
+ add %g1, 1, %g1
+ wrpr %g1, 0x0, %cwp
+
+ rdpr %wstate, %g2
+ sll %g2, 3, %g2
+ wrpr %g2, 0x0, %wstate
+
+ /* We know %canrestore and %otherwin are both zero. */
+
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
+ mov PRIMARY_CONTEXT, %g1
+
+661: stxa %g2, [%g1] ASI_DMMU
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ stxa %g2, [%g1] ASI_MMU
+ .previous
+
+ sethi %hi(KERNBASE), %g1
+ flush %g1
+
+ mov %g4, %l4
+ mov %g5, %l5
+ brnz,pn %g3, 1f
+ mov %g3, %l3
+
+ or %g4, FAULT_CODE_WINFIXUP, %g4
+ stb %g4, [%g6 + TI_FAULT_CODE]
+ stx %g5, [%g6 + TI_FAULT_ADDR]
+1:
+ mov %g6, %l1
+ wrpr %g0, 0x0, %tl
+
+661: nop
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ SET_GL(0)
+ .previous
+
+ wrpr %g0, RTRAP_PSTATE, %pstate
+
+ mov %l1, %g6
+ ldx [%g6 + TI_TASK], %g4
+ LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+
+ brnz,pn %l3, 1f
+ nop
+
+ call do_sparc64_fault
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ nop
+
+1: cmp %g3, 2
+ bne,pn %xcc, 2f
+ nop
+
+ sethi %hi(tlb_type), %g1
+ lduw [%g1 + %lo(tlb_type)], %g1
+ cmp %g1, 3
+ bne,pt %icc, 1f
+ add %sp, PTREGS_OFF, %o0
+ mov %l4, %o2
+ call sun4v_do_mna
+ mov %l5, %o1
+ ba,a,pt %xcc, rtrap
+1: mov %l4, %o1
+ mov %l5, %o2
+ call mem_address_unaligned
+ nop
+ ba,a,pt %xcc, rtrap
+
+2: sethi %hi(tlb_type), %g1
+ mov %l4, %o1
+ lduw [%g1 + %lo(tlb_type)], %g1
+ mov %l5, %o2
+ cmp %g1, 3
+ bne,pt %icc, 1f
+ add %sp, PTREGS_OFF, %o0
+ call sun4v_data_access_exception
+ nop
+ ba,a,pt %xcc, rtrap
+
+1: call spitfire_data_access_exception
+ nop
+ ba,a,pt %xcc, rtrap
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pte_t *pte = NULL;
if (page)
pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
* the Data-TLB for huge pages.
*/
if (tlb_type == cheetah_plus) {
+ bool need_context_reload = false;
unsigned long ctx;
- spin_lock(&ctx_alloc_lock);
+ spin_lock_irq(&ctx_alloc_lock);
ctx = mm->context.sparc64_ctx_val;
ctx &= ~CTX_PGSZ_MASK;
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
* also executing in this address space.
*/
mm->context.sparc64_ctx_val = ctx;
- on_each_cpu(context_reload, mm, 0);
+ need_context_reload = true;
}
- spin_unlock(&ctx_alloc_lock);
+ spin_unlock_irq(&ctx_alloc_lock);
+
+ if (need_context_reload)
+ on_each_cpu(context_reload, mm, 0);
}
}
#endif
#ifndef __ASSEMBLY__
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
/*
* Release a thread_info structure
*/
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *info = (void *)stack;
struct single_step_state *step_state = info->step_state;
if (step_state) {
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
int order)
{
- gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
+ gfp_t flags = GFP_KERNEL|__GFP_ZERO;
struct page *p;
int i;
{
pte_t *pte;
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
{
struct page *pte;
- pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/*
* Allocate one PTE table.
source "drivers/pci/Kconfig"
+config ISA_BUS
+ bool "ISA-style bus support on modern systems" if EXPERT
+ select ISA_BUS_API
+ help
+ Enables ISA-style drivers on modern systems. This is necessary to
+ support PC/104 devices on X86_64 platforms.
+
+ If unsure, say N.
+
# x86_64 have no ISA slots, but can have ISA-style DMA.
config ISA_DMA_API
bool "ISA-style DMA support" if (X86_64 && EXPERT)
for i in lib lib64 share end ; do \
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
+ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
+ fi ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \
msr_fail:
pr_cont("Broken PMU hardware detected, using software events only.\n");
- pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+ printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
reg, val_new);
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stack_frame frame;
- const void __user *fp;
+ const unsigned long __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return;
- fp = (void __user *)regs->bp;
+ fp = (unsigned long __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
pagefault_disable();
while (entry->nr < entry->max_stack) {
unsigned long bytes;
+
frame.next_frame = NULL;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, 16))
+ if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
break;
- bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
if (bytes != 0)
break;
- bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+ bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
if (bytes != 0)
break;
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
-intel-rapl-objs := rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
+intel-rapl-perf-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
+
+ /*
+ * when HT is off, these can only run on the bottom 4 counters
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+ /*
+ * when HT is off, these can only run on the bottom 4 counters
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
EVENT_CONSTRAINT_END
};
int i;
for (i = 0; i < rapl_pmus->maxpkg; i++)
- kfree(rapl_pmus->pmus + i);
+ kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}
.format_group = &hswep_uncore_cbox_format_group,
};
-static struct intel_uncore_type bdx_uncore_sbox = {
- .name = "sbox",
- .num_counters = 4,
- .num_boxes = 4,
- .perf_ctr_bits = 48,
- .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
- .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
- .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
- .msr_offset = HSWEP_SBOX_MSR_OFFSET,
- .ops = &hswep_uncore_sbox_msr_ops,
- .format_group = &hswep_uncore_sbox_format_group,
-};
-
-#define BDX_MSR_UNCORE_SBOX 3
-
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
&hswep_uncore_pcu,
- &bdx_uncore_sbox,
NULL,
};
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
-
- /* BDX-DE doesn't have SBOX */
- if (boot_cpu_data.x86_model == 86)
- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
*/
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
+#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#endif /* _ASM_X86_CPUFEATURES_H */
--- /dev/null
+#ifndef _ASM_X86_INTEL_FAMILY_H
+#define _ASM_X86_INTEL_FAMILY_H
+
+/*
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+ * "Extreme" ones, like Broadwell-E.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them. There's no processor called "WESTMERE2".
+ */
+
+#define INTEL_FAM6_CORE_YONAH 0x0E
+#define INTEL_FAM6_CORE2_MEROM 0x0F
+#define INTEL_FAM6_CORE2_MEROM_L 0x16
+#define INTEL_FAM6_CORE2_PENRYN 0x17
+#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
+
+#define INTEL_FAM6_NEHALEM 0x1E
+#define INTEL_FAM6_NEHALEM_EP 0x1A
+#define INTEL_FAM6_NEHALEM_EX 0x2E
+#define INTEL_FAM6_WESTMERE 0x25
+#define INTEL_FAM6_WESTMERE2 0x1F
+#define INTEL_FAM6_WESTMERE_EP 0x2C
+#define INTEL_FAM6_WESTMERE_EX 0x2F
+
+#define INTEL_FAM6_SANDYBRIDGE 0x2A
+#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
+#define INTEL_FAM6_IVYBRIDGE 0x3A
+#define INTEL_FAM6_IVYBRIDGE_X 0x3E
+
+#define INTEL_FAM6_HASWELL_CORE 0x3C
+#define INTEL_FAM6_HASWELL_X 0x3F
+#define INTEL_FAM6_HASWELL_ULT 0x45
+#define INTEL_FAM6_HASWELL_GT3E 0x46
+
+#define INTEL_FAM6_BROADWELL_CORE 0x3D
+#define INTEL_FAM6_BROADWELL_XEON_D 0x56
+#define INTEL_FAM6_BROADWELL_GT3E 0x47
+#define INTEL_FAM6_BROADWELL_X 0x4F
+
+#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
+#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
+#define INTEL_FAM6_SKYLAKE_X 0x55
+#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
+#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
+
+/* "Small Core" Processors (Atom) */
+
+#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
+#define INTEL_FAM6_ATOM_LINCROFT 0x26
+#define INTEL_FAM6_ATOM_PENWELL 0x27
+#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
+#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
+#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
+#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
+#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
+#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+
+/* Xeon Phi */
+
+#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+
+#endif /* _ASM_X86_INTEL_FAMILY_H */
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) \
- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+ (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR) \
+ (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
+ MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
#define flush_insn_slot(p) do { } while (0)
#include <linux/irqbypass.h>
#include <linux/hyperv.h>
+#include <asm/apic.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline int kvm_cpu_get_apicid(int mps_cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ return __default_cpu_present_to_apicid(mps_cpu);
+#else
+ WARN_ON_ONCE(1);
+ return BAD_APICID;
+#endif
+}
+
#endif /* _ASM_X86_KVM_HOST_H */
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
- page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!page)
return NULL;
if (!pgtable_pmd_page_ctor(page)) {
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ return (pud_t *)get_zeroed_page(GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
return product;
}
-static __always_inline
-u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
-{
- u64 delta = rdtsc_ordered() - src->tsc_timestamp;
- return pvclock_scale_delta(delta, src->tsc_to_system_mul,
- src->tsc_shift);
-}
-
static __always_inline
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
cycle_t *cycles, u8 *flags)
{
unsigned version;
- cycle_t ret, offset;
- u8 ret_flags;
+ cycle_t offset;
+ u64 delta;
version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
- offset = pvclock_get_nsec_offset(src);
- ret = src->system_time + offset;
- ret_flags = src->flags;
-
- *cycles = ret;
- *flags = ret_flags;
+ delta = rdtsc_ordered() - src->tsc_timestamp;
+ offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+ *cycles = src->system_time + offset;
+ *flags = src->flags;
return version;
}
struct thread_info;
struct stacktrace_ops;
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
unsigned long *stack,
unsigned long bp,
const struct stacktrace_ops *ops,
int *graph);
extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
i++;
- if (i == 0)
- return 0;
+ if (!i)
+ return -ENODEV;
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
+ ioapics[i].iomem_res = &res[num];
num++;
- ioapics[i].iomem_res = res;
}
ioapic_resources = res;
u64 value;
/* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+ if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{
- struct task_struct *task;
unsigned long ret_addr;
int index;
if (addr != (unsigned long)return_to_handler)
return;
- task = tinfo->task;
index = task->curr_ret_stack;
if (!task->ret_stack || index < *graph)
static inline void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{ }
#endif
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
void *p, unsigned int size, void *end)
{
- void *t = tinfo;
+ void *t = task_stack_page(task);
if (end) {
if (p < end && p >= (end-THREAD_SIZE))
return 1;
}
unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
{
struct stack_frame *frame = (struct stack_frame *)bp;
- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+ while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
unsigned long addr;
addr = *stack;
} else {
ops->address(data, addr, 0);
}
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
stack++;
}
EXPORT_SYMBOL_GPL(print_context_stack);
unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long *ret_addr = &frame->return_address;
- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+ while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
unsigned long addr = *ret_addr;
if (!__kernel_text_address(addr))
break;
frame = frame->next_frame;
ret_addr = &frame->return_address;
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
return (unsigned long)frame;
bp = stack_frame(task, regs);
for (;;) {
- struct thread_info *context;
void *end_stack;
end_stack = is_hardirq_stack(stack, cpu);
if (!end_stack)
end_stack = is_softirq_stack(stack, cpu);
- context = task_thread_info(task);
- bp = ops->walk_stack(context, stack, bp, ops, data,
+ bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
/* Stop if not on irq stack */
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
- struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
unsigned used = 0;
* current stack address. If the stacks consist of nested
* exceptions
*/
- tinfo = task_thread_info(task);
while (!done) {
unsigned long *stack_end;
enum stack_type stype;
if (ops->stack(data, id) < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp, ops,
+ bp = ops->walk_stack(task, stack, bp, ops,
data, stack_end, &graph);
ops->stack(data, "<EOE>");
/*
if (ops->stack(data, "IRQ") < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp,
+ bp = ops->walk_stack(task, stack, bp,
ops, data, stack_end, &graph);
/*
* We link to the next stack (which would be
/*
* This handles the process stack:
*/
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+ bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
#include <drm/i915_drm.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
static void __init fix_hypertransport_config(int num, int slot, int func)
{
{
#ifdef CONFIG_ACPI
#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Only applies to Nvidia root ports (bus 0) and not to
+ * Nvidia graphics cards with PCI ports on secondary buses.
+ */
+ if (num)
+ return;
+
/*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
#endif
}
+#define BCM4331_MMIO_SIZE 16384
+#define BCM4331_PM_CAP 0x40
+#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+ void __iomem *mmio;
+ u16 pmcsr;
+ u64 addr;
+ int i;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+ return;
+
+ /* Card may have been put into PCI_D3hot by grub quirk */
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+ mdelay(10);
+
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ dev_err("Cannot power up Apple AirPort card\n");
+ return;
+ }
+ }
+
+ addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+ addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+ addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+ if (!mmio) {
+ dev_err("Cannot iomap Apple AirPort card\n");
+ return;
+ }
+
+ pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+ for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+ udelay(10);
+
+ bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(1);
+
+ bcma_awrite32(BCMA_RESET_CTL, 0);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(10);
+
+ early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
void (*f)(int num, int slot, int func);
};
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_BROADCOM, 0x4331,
+ PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
};
+static void __init early_pci_scan_bus(int bus);
+
/**
* check_dev_quirk - apply early quirks to a given PCI device
* @num: bus number
*
* Check the vendor & device ID against the early quirks table.
*
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
* poke at this device again.
*/
static int __init check_dev_quirk(int num, int slot, int func)
u16 vendor;
u16 device;
u8 type;
+ u8 sec;
int i;
class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
type = read_pci_config_byte(num, slot, func,
PCI_HEADER_TYPE);
+
+ if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+ sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+ if (sec > num)
+ early_pci_scan_bus(sec);
+ }
+
if (!(type & 0x80))
return -1;
return 0;
}
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
{
int slot, func;
- if (!early_pci_allowed())
- return;
-
/* Poor man's PCI discovery */
- /* Only scan the root bus */
for (slot = 0; slot < 32; slot++)
for (func = 0; func < 8; func++) {
/* Only probe function 0 on single fn devices */
- if (check_dev_quirk(0, slot, func))
+ if (check_dev_quirk(bus, slot, func))
break;
}
}
+
+void __init early_quirks(void)
+{
+ if (!early_pci_allowed())
+ return;
+
+ early_pci_scan_bus(0);
+}
# error "Need more than one PGD for the ESPFIX hack"
#endif
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
void do_softirq_own_stack(void)
{
- struct thread_info *curstk;
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- curstk = current_stack();
irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */
* normal page fault.
*/
regs->ip = (unsigned long)cur->addr;
+ /*
+ * Trap flag (TF) has been set here because this fault
+ * happened where the single stepping will be done.
+ * So clear it by resetting the current kprobe:
+ */
+ regs->flags &= ~X86_EFLAGS_TF;
+
+ /*
+ * If the TF flag was set before the kprobe hit,
+ * don't touch it:
+ */
regs->flags |= kcb->kprobe_old_flags;
+
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
{
unsigned version;
- cycle_t ret;
u8 flags;
do {
- version = __pvclock_read_cycles(src, &ret, &flags);
+ version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
+
+ flags = src->flags;
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
return flags & valid_flags;
do {
version = __pvclock_read_cycles(src, &ret, &flags);
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
local_irq_disable();
}
+/*
+ * In IST context, we explicitly disable preemption. This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
void ist_enter(struct pt_regs *regs)
{
if (user_mode(regs)) {
rcu_nmi_enter();
}
- /*
- * We are atomic because we're on the IST stack; or we're on
- * x86_32, in which case we still shouldn't schedule; or we're
- * on x86_64 and entered from user mode, in which case we're
- * still atomic unless ist_begin_non_atomic is called.
- */
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
void ist_exit(struct pt_regs *regs)
{
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
}
/**
*/
void ist_end_non_atomic(void)
{
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
}
static nokprobe_inline int
struct kvm_cpuid_entry __user *entries)
{
int r, i;
- struct kvm_cpuid_entry *cpuid_entries;
+ struct kvm_cpuid_entry *cpuid_entries = NULL;
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
r = -ENOMEM;
- cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
- if (!cpuid_entries)
- goto out;
- r = -EFAULT;
- if (copy_from_user(cpuid_entries, entries,
- cpuid->nent * sizeof(struct kvm_cpuid_entry)))
- goto out_free;
+ if (cpuid->nent) {
+ cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
+ cpuid->nent);
+ if (!cpuid_entries)
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(cpuid_entries, entries,
+ cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+ goto out;
+ }
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
kvm_x86_ops->cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
-out_free:
- vfree(cpuid_entries);
out:
+ vfree(cpuid_entries);
return r;
}
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline)
- __delay(tsc_deadline - guest_tsc);
+ __delay(min(tsc_deadline - guest_tsc,
+ nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
}
static void start_apic_timer(struct kvm_lapic *apic)
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
- *sptep = spte;
+ WRITE_ONCE(*sptep, spte);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
- *sptep = spte;
+ WRITE_ONCE(*sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
*/
smp_wmb();
- ssptep->spte_low = sspte.spte_low;
+ WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
- ssptep->spte_low = sspte.spte_low;
+ WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
/*
* If we map the spte from present to nonpresent, we should clear
/* enable / disable AVIC */
static int avic;
+#ifdef CONFIG_X86_LOCAL_APIC
module_param(avic, int, S_IRUGO);
+#endif
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
} else
kvm_disable_tdp();
- if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
- avic = false;
-
- if (avic)
- pr_info("AVIC enabled\n");
+ if (avic) {
+ if (!npt_enabled ||
+ !boot_cpu_has(X86_FEATURE_AVIC) ||
+ !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+ avic = false;
+ else
+ pr_info("AVIC enabled\n");
+ }
return 0;
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
u64 entry;
- int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+ int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
{
u64 entry;
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
- int h_physical_id = __default_cpu_present_to_apicid(cpu);
+ int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
if (avic_vcpu_is_running(vcpu))
wrmsrl(SVM_AVIC_DOORBELL,
- __default_cpu_present_to_apicid(vcpu->cpu));
+ kvm_cpu_get_apicid(vcpu->cpu));
else
kvm_vcpu_wake_up(vcpu);
}
unsigned int dest;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
do {
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
/* Set SN when the vCPU is preempted */
/* Checks for #GP/#SS exceptions. */
exn = false;
- if (is_protmode(vcpu)) {
+ if (is_long_mode(vcpu)) {
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+ exn = is_noncanonical_address(*ret);
+ } else if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the
* following order:
* - segment type check (#GP(0) may be thrown)
* execute-only code segment
*/
exn = ((s.type & 0xa) == 8);
- }
- if (exn) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
- if (is_long_mode(vcpu)) {
- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
- * non-canonical form. This is an only check for long mode.
- */
- exn = is_noncanonical_address(*ret);
- } else if (is_protmode(vcpu)) {
+ if (exn) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return 0;
vcpu->pre_pcpu = vcpu->cpu;
unsigned long flags;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
do {
int idx, ret = -EINVAL;
if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(kvm->vcpus[0]))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz;
-static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
-{
- return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
- vcpu->arch.virtual_tsc_shift);
-}
-
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
u64 v = (u64)khz * (1000000 + ppm);
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
+ case MSR_IA32_PERF_CTL:
msr_info->data = 0;
break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
| KVM_VCPUEVENT_VALID_SMM))
return -EINVAL;
+ if (events->exception.injected &&
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+ return -EINVAL;
+
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
if (dbgregs->flags)
return -EINVAL;
+ if (dbgregs->dr6 & ~0xffffffffull)
+ return -EINVAL;
+ if (dbgregs->dr7 & ~0xffffffffull)
+ return -EINVAL;
+
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
slot = id_to_memslot(slots, id);
if (size) {
- if (WARN_ON(slot->npages))
+ if (slot->npages)
return -EEXIST;
/*
#define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h>
+#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
extern struct static_key kvm_no_apic_vcpu;
+static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
+{
+ return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
+ vcpu->arch.virtual_tsc_shift);
+}
+
/* Same "calling convention" as do_div:
* - divide (n << 32) by base
* - put result in n
void *data)
{
if (val == DIE_GPF) {
- pr_emerg("CONFIG_KASAN_INLINE enabled");
- pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+ pr_emerg("CONFIG_KASAN_INLINE enabled\n");
+ pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
}
return NOTIFY_OK;
}
#include <asm/fixmap.h>
#include <asm/mtrr.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
#ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM
return -ENODEV;
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+ acpi_irq_penalty_init();
pcibios_enable_irq = acpi_pci_irq_enable;
pcibios_disable_irq = acpi_pci_irq_disable;
x86_init.pci.init_irq = x86_init_noop;
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+ gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
if (!efi_pgd)
return -ENOMEM;
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
+#include <asm/tlbflush.h>
/* Defined in hibernate_asm_64.S */
extern asmlinkage __visible int restore_image(void);
* kernel's text (this value is passed in the image header).
*/
unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
/*
* Value of the cr3 register from before the hibernation (this value is passed
pgd_t *temp_level4_pgt __visible;
-void *relocated_restore_code __visible;
+unsigned long relocated_restore_code __visible;
+
+static int set_up_temporary_text_mapping(void)
+{
+ pmd_t *pmd;
+ pud_t *pud;
+
+ /*
+ * The new mapping only has to cover the page containing the image
+ * kernel's entry point (jump_address_phys), because the switch over to
+ * it is carried out by relocated code running from a page allocated
+ * specifically for this purpose and covered by the identity mapping, so
+ * the temporary kernel text mapping is only needed for the final jump.
+ * Moreover, in that mapping the virtual address of the image kernel's
+ * entry point must be the same as its virtual address in the image
+ * kernel (restore_jump_address), so the image kernel's
+ * restore_registers() code doesn't find itself in a different area of
+ * the virtual address space after switching over to the original page
+ * tables used by the image kernel.
+ */
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
+
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
+
+ set_pmd(pmd + pmd_index(restore_jump_address),
+ __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+ set_pud(pud + pud_index(restore_jump_address),
+ __pud(__pa(pmd) | _KERNPG_TABLE));
+ set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+ __pgd(__pa(pud) | _KERNPG_TABLE));
+
+ return 0;
+}
static void *alloc_pgt_page(void *context)
{
if (!temp_level4_pgt)
return -ENOMEM;
- /* It is safe to reuse the original kernel mapping */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+ /* Prepare a temporary mapping for the kernel text */
+ result = set_up_temporary_text_mapping();
+ if (result)
+ return result;
/* Set up the direct mapping from scratch */
for (i = 0; i < nr_pfn_mapped; i++) {
return 0;
}
+static int relocate_restore_code(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ relocated_restore_code = get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+
+ memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+
+ /* Make the page containing the relocated code executable */
+ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+ pud = pud_offset(pgd, relocated_restore_code);
+ if (pud_large(*pud)) {
+ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+ } else {
+ pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
+
+ if (pmd_large(*pmd)) {
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+ } else {
+ pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
+
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+ }
+ }
+ __flush_tlb_all();
+
+ return 0;
+}
+
int swsusp_arch_resume(void)
{
int error;
/* We have got enough memory and from now on we cannot recover */
- if ((error = set_up_temporary_mappings()))
+ error = set_up_temporary_mappings();
+ if (error)
return error;
- relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
- if (!relocated_restore_code)
- return -ENOMEM;
- memcpy(relocated_restore_code, &core_restore_code,
- &restore_registers - &core_restore_code);
+ error = relocate_restore_code();
+ if (error)
+ return error;
restore_image();
return 0;
struct restore_data_record {
unsigned long jump_address;
+ unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
};
-#define RESTORE_MAGIC 0x0123456789ABCDEFUL
+#define RESTORE_MAGIC 0x123456789ABCDEF0UL
/**
* arch_hibernation_header_save - populate the architecture specific part
if (max_size < sizeof(struct restore_data_record))
return -EOVERFLOW;
- rdr->jump_address = restore_jump_address;
+ rdr->jump_address = (unsigned long)&restore_registers;
+ rdr->jump_address_phys = __pa_symbol(&restore_registers);
rdr->cr3 = restore_cr3;
rdr->magic = RESTORE_MAGIC;
return 0;
struct restore_data_record *rdr = addr;
restore_jump_address = rdr->jump_address;
+ jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
}
pushfq
popq pt_regs_flags(%rax)
- /* save the address of restore_registers */
- movq $restore_registers, %rax
- movq %rax, restore_jump_address(%rip)
/* save cr3 */
movq %cr3, %rax
movq %rax, restore_cr3(%rip)
ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
- /* switch to temporary page tables */
- movq $__PAGE_OFFSET, %rdx
- movq temp_level4_pgt(%rip), %rax
- subq %rdx, %rax
- movq %rax, %cr3
- /* Flush TLB */
- movq mmu_cr4_features(%rip), %rax
- movq %rax, %rdx
- andq $~(X86_CR4_PGE), %rdx
- movq %rdx, %cr4; # turn off PGE
- movq %cr3, %rcx; # flush TLB
- movq %rcx, %cr3;
- movq %rax, %cr4; # turn PGE back on
-
/* prepare to jump to the image kernel */
- movq restore_jump_address(%rip), %rax
- movq restore_cr3(%rip), %rbx
+ movq restore_jump_address(%rip), %r8
+ movq restore_cr3(%rip), %r9
+
+ /* prepare to switch to temporary page tables */
+ movq temp_level4_pgt(%rip), %rax
+ movq mmu_cr4_features(%rip), %rbx
/* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
+
+ /* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
+ /* switch to temporary page tables */
+ movq $__PAGE_OFFSET, %rcx
+ subq %rcx, %rax
+ movq %rax, %cr3
+ /* flush TLB */
+ movq %rbx, %rcx
+ andq $~(X86_CR4_PGE), %rcx
+ movq %rcx, %cr4; # turn off PGE
+ movq %cr3, %rcx; # flush TLB
+ movq %rcx, %cr3;
+ movq %rbx, %cr4; # turn PGE back on
.Lloop:
testq %rdx, %rdx
jz .Ldone
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
jmp .Lloop
+
.Ldone:
/* jump to the restore_registers address from the image header */
- jmpq *%rax
- /*
- * NOTE: This assumes that the boot kernel's text mapping covers the
- * image kernel's page containing restore_registers and the address of
- * this page is the same as in the image kernel's text mapping (it
- * should always be true, because the text mapping is linear, starting
- * from 0, and is supposed to cover the entire kernel text for every
- * kernel).
- *
- * code below belongs to the image kernel
- */
+ jmpq *%r8
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
ENTRY(restore_registers)
FRAME_BEGIN
/* go back to the original page tables */
- movq %rbx, %cr3
+ movq %r9, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
#endif
}
-#ifdef CONFIG_X86_32
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
- /* If there's an existing pte, then don't allow _PAGE_RW to be set */
- if (pte_val_ma(*ptep) & _PAGE_PRESENT)
- pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
- pte_val_ma(pte));
-
- return pte;
-}
-#else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
- unsigned long pfn;
-
- if (xen_feature(XENFEAT_writable_page_tables) ||
- xen_feature(XENFEAT_auto_translated_physmap) ||
- xen_start_info->mfn_list >= __START_KERNEL_map)
- return pte;
-
- /*
- * Pages belonging to the initial p2m list mapped outside the default
- * address range must be mapped read-only. This region contains the
- * page tables for mapping the p2m list, too, and page tables MUST be
- * mapped read-only.
- */
- pfn = pte_pfn(pte);
- if (pfn >= xen_start_info->first_p2m_pfn &&
- pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
- pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
-
- return pte;
-}
-#endif /* CONFIG_X86_64 */
-
/*
* Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW.
* so always write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary.
*/
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+__visible pte_t xen_make_pte_init(pteval_t pte)
{
- if (pte_mfn(pte) != INVALID_P2M_ENTRY)
- pte = mask_rw_pte(ptep, pte);
- else
- pte = __pte_ma(0);
+#ifdef CONFIG_X86_64
+ unsigned long pfn;
+
+ /*
+ * Pages belonging to the initial p2m list mapped outside the default
+ * address range must be mapped read-only. This region contains the
+ * page tables for mapping the p2m list, too, and page tables MUST be
+ * mapped read-only.
+ */
+ pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+ if (xen_start_info->mfn_list < __START_KERNEL_map &&
+ pfn >= xen_start_info->first_p2m_pfn &&
+ pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+ pte &= ~_PAGE_RW;
+#endif
+ pte = pte_pfn_to_mfn(pte);
+ return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+ if (pte_mfn(pte) != INVALID_P2M_ENTRY
+ && pte_val_ma(*ptep) & _PAGE_PRESENT)
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+#endif
native_set_pte(ptep, pte);
}
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
+ pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
.pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
- .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+ .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
#ifdef CONFIG_X86_PAE
if (unlikely(!slab_is_available()))
return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
- return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ return (void *)__get_free_page(GFP_KERNEL);
}
static void __ref free_p2m_page(void *p)
pte_t *ptep;
int i;
- ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ ptep = (pte_t *)__get_free_page(GFP_KERNEL);
if (!ptep)
return NULL;
for (i = 0; i < 1024; i++)
ret = submit_bio_wait(type, bio);
if (ret == -EOPNOTSUPP)
ret = 0;
+ bio_put(bio);
}
blk_finish_plug(&plug);
}
}
- if (bio)
+ if (bio) {
ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ bio_put(bio);
+ }
return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
}
}
- if (bio)
- return submit_bio_wait(WRITE, bio);
+ if (bio) {
+ ret = submit_bio_wait(WRITE, bio);
+ bio_put(bio);
+ return ret;
+ }
return 0;
}
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q)) {
- if (blk_attempt_plug_merge(q, bio, &request_count,
- &same_queue_rq))
- return BLK_QC_T_NONE;
- } else
- request_count = blk_plug_queued_count(q);
+ if (!is_flush_fua && !blk_queue_nomerges(q) &&
+ blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+ return BLK_QC_T_NONE;
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return BLK_QC_T_NONE;
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+ return BLK_QC_T_NONE;
+ } else
+ request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
+ task_unlock(p);
out:
return ret;
}
tristate "Asymmetric public-key crypto algorithm subtype"
select MPILIB
select CRYPTO_HASH_INFO
+ select CRYPTO_AKCIPHER
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
struct pefile_context *ctx = context;
ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
- return ctx->digest ? 0 : -ENOMEM;
+ if (!ctx->digest)
+ return -ENOMEM;
+
+ ctx->digest_len = vlen;
+
+ return 0;
}
if (asymmetric_key_id_same(p->id, auth))
goto found_issuer_check_skid;
}
- } else {
+ } else if (sig->auth_ids[1]) {
auth = sig->auth_ids[1];
pr_debug("- want %*phN\n", auth->len, auth->data);
for (p = pkcs7->certs; p; p = p->next) {
sig = payload->data[asym_auth];
if (!sig->auth_ids[0] && !sig->auth_ids[1])
- return 0;
+ return -ENOKEY;
if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
return -EPERM;
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+ [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
};
};
struct pkcs1pad_request {
- struct akcipher_request child_req;
-
struct scatterlist in_sg[3], out_sg[2];
uint8_t *in_buf, *out_buf;
+
+ struct akcipher_request child_req;
};
static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
+ acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
return ret;
}
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
+ acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
return n;
}
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
-
- /*
- * We don't care about error returns - we just try to mark
- * these reserved so that nobody else is confused into thinking
- * that this region might be unused..
- *
- * (In particular, allocating the IO range for Cardbus)
- */
- request_region(pr->throttling.address, 6, "ACPI CPU throttle");
}
/*
}
int acpi_video_get_levels(struct acpi_device *device,
- struct acpi_video_device_brightness **dev_br)
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level)
{
union acpi_object *obj = NULL;
int i, max_level = 0, count = 0, level_ac_battery = 0;
br->count = count;
*dev_br = br;
+ if (pmax_level)
+ *pmax_level = max_level;
out:
kfree(obj);
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
- result = acpi_video_get_levels(device->dev, &br);
+ result = acpi_video_get_levels(device->dev, &br, &max_level);
if (result)
return result;
device->brightness = br;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry) {
- if (!acpi_video_device_lcd_query_levels(dev, &levels))
+ if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
kfree(levels);
}
mutex_unlock(&video->device_list_lock);
static u8
acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
{
- u64 address;
-
if (!reg->access_width) {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ max_bit_width = 32;
+ }
+
/*
* Detect old register descriptors where only the bit_width field
- * makes senses. The target address is copied to handle possible
- * alignment issues.
+ * makes senses.
*/
- ACPI_MOVE_64_TO_64(&address, ®->address);
- if (!reg->bit_offset && reg->bit_width &&
+ if (reg->bit_width < max_bit_width &&
+ !reg->bit_offset && reg->bit_width &&
ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
- ACPI_IS_ALIGNED(reg->bit_width, 8) &&
- ACPI_IS_ALIGNED(address, reg->bit_width)) {
+ ACPI_IS_ALIGNED(reg->bit_width, 8)) {
return (reg->bit_width);
- } else {
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- return (32);
- } else {
- return (max_bit_width);
- }
}
+ return (max_bit_width);
} else {
return (1 << (reg->access_width + 2));
}
acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
{
u64 address;
- u8 access_width;
- u32 bit_width;
- u8 bit_offset;
- u64 value64;
- u32 new_value32, old_value32;
- u8 index;
acpi_status status;
ACPI_FUNCTION_NAME(hw_write);
return (status);
}
- /* Convert access_width into number of bits based */
-
- access_width = acpi_hw_get_access_bit_width(reg, 32);
- bit_width = reg->bit_offset + reg->bit_width;
- bit_offset = reg->bit_offset;
-
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- index = 0;
- while (bit_width) {
- /*
- * Use offset style bit reads because "Index * AccessWidth" is
- * ensured to be less than 32-bits by acpi_hw_validate_register().
- */
- new_value32 = ACPI_GET_BITS(&value, index * access_width,
- ACPI_MASK_BITS_ABOVE_32
- (access_width));
-
- if (bit_offset >= access_width) {
- bit_offset -= access_width;
- } else {
- /*
- * Use offset style bit masks because access_width is ensured
- * to be less than 32-bits by acpi_hw_validate_register() and
- * bit_offset/bit_width is less than access_width here.
- */
- if (bit_offset) {
- new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
- }
- if (bit_width < access_width) {
- new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
- }
-
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- if (bit_offset || bit_width < access_width) {
- /*
- * Read old values in order not to modify the bits that
- * are beyond the register bit_width/bit_offset setting.
- */
- status =
- acpi_os_read_memory((acpi_physical_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- &value64,
- access_width);
- old_value32 = (u32)value64;
-
- /*
- * Use offset style bit masks because access_width is
- * ensured to be less than 32-bits by
- * acpi_hw_validate_register() and bit_offset/bit_width is
- * less than access_width here.
- */
- if (bit_offset) {
- old_value32 &=
- ACPI_MASK_BITS_ABOVE
- (bit_offset);
- bit_offset = 0;
- }
- if (bit_width < access_width) {
- old_value32 &=
- ACPI_MASK_BITS_BELOW
- (bit_width);
- }
-
- new_value32 |= old_value32;
- }
-
- value64 = (u64)new_value32;
- status =
- acpi_os_write_memory((acpi_physical_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- value64, access_width);
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- if (bit_offset || bit_width < access_width) {
- /*
- * Read old values in order not to modify the bits that
- * are beyond the register bit_width/bit_offset setting.
- */
- status =
- acpi_hw_read_port((acpi_io_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- &old_value32,
- access_width);
-
- /*
- * Use offset style bit masks because access_width is
- * ensured to be less than 32-bits by
- * acpi_hw_validate_register() and bit_offset/bit_width is
- * less than access_width here.
- */
- if (bit_offset) {
- old_value32 &=
- ACPI_MASK_BITS_ABOVE
- (bit_offset);
- bit_offset = 0;
- }
- if (bit_width < access_width) {
- old_value32 &=
- ACPI_MASK_BITS_BELOW
- (bit_width);
- }
-
- new_value32 |= old_value32;
- }
-
- status = acpi_hw_write_port((acpi_io_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- new_value32,
- access_width);
- }
- }
-
- /*
- * Index * access_width is ensured to be less than 32-bits by
- * acpi_hw_validate_register().
- */
- bit_width -=
- bit_width > access_width ? access_width : bit_width;
- index++;
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ address, (u64)value,
+ reg->bit_width);
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_write_port((acpi_io_address)
+ address, value, reg->bit_width);
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, access_width, ACPI_FORMAT_UINT64(address),
+ value, reg->bit_width, ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
*/
- acpi_boot_ec_enable();
+ acpi_ec_dsdt_probe();
printk(KERN_INFO PREFIX "Interpreter enabled\n");
static void ec_remove_handlers(struct acpi_ec *ec)
{
- acpi_ec_stop(ec, false);
-
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
+ /*
+ * Stops handling the EC transactions after removing the operation
+ * region handler. This is required because _REG(DISCONNECT)
+ * invoked during the removal can result in new EC transactions.
+ *
+ * Flushes the EC requests and thus disables the GPE before
+ * removing the GPE handler. This is required by the current ACPICA
+ * GPE core. ACPICA GPE core will automatically disable a GPE when
+ * it is indicated but there is no way to handle it. So the drivers
+ * must disable the GPEs prior to removing the GPE handlers.
+ */
+ acpi_ec_stop(ec, false);
+
if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
return AE_OK;
}
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+ {"PNP0C09", 0},
+ {"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
{
- if (!boot_ec)
+ acpi_status status;
+
+ if (boot_ec)
return 0;
+
+ /*
+ * Finding EC from DSDT if there is no ECDT EC available. When this
+ * function is invoked, ACPI tables have been fully loaded, we can
+ * walk namespace now.
+ */
+ boot_ec = make_acpi_ec();
+ if (!boot_ec)
+ return -ENOMEM;
+ status = acpi_get_devices(ec_device_ids[0].id,
+ ec_parse_device, boot_ec, NULL);
+ if (ACPI_FAILURE(status) || !boot_ec->handle)
+ return -ENODEV;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;
return 0;
return -EFAULT;
}
-static const struct acpi_device_id ec_device_ids[] = {
- {"PNP0C09", 0},
- {"", 0},
-};
-
#if 0
/*
* Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
+ return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
}
static DEVICE_ATTR_RO(format);
continue;
if (nfit_dcr->dcr->code == dcr->code)
continue;
- rc = sprintf(buf, "%#x\n",
- be16_to_cpu(nfit_dcr->dcr->code));
+ rc = sprintf(buf, "0x%04x\n",
+ le16_to_cpu(nfit_dcr->dcr->code));
break;
}
if (rc != ENXIO)
if (disable_vendor_specific)
dsm_mask &= ~(1 << 8);
} else {
- dev_err(dev, "unknown dimm command family\n");
+ dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
- return force_enable_dimms ? 0 : -ENODEV;
+ /* DSMs are optional, continue loading the driver... */
+ return 0;
}
uuid = to_nfit_uuid(nfit_mem->family);
};
/*
- * Region format interface codes are stored as an array of bytes in the
- * NFIT DIMM Control Region structure
+ * Region format interface codes are stored with the interface as the
+ * LSB and the function as the MSB.
*/
-#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
-#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
-#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
+#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
enum {
NFIT_BLK_READ_FLUSH = 1,
{
struct acpi_pci_link *link;
int penalty = 0;
+ int i;
list_for_each_entry(link, &acpi_link_list, list) {
/*
*/
if (link->irq.active && link->irq.active == irq)
penalty += PIRQ_PENALTY_PCI_USING;
- else {
- int i;
-
- /*
- * If a link is inactive, penalize the IRQs it
- * might use, but not as severely.
- */
- for (i = 0; i < link->irq.possible_count; i++)
- if (link->irq.possible[i] == irq)
- penalty += PIRQ_PENALTY_PCI_POSSIBLE /
- link->irq.possible_count;
- }
+
+ /*
+ * penalize the IRQs PCI might use, but not as severely.
+ */
+ for (i = 0; i < link->irq.possible_count; i++)
+ if (link->irq.possible[i] == irq)
+ penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
}
return penalty;
{
int penalty = 0;
- if (irq < ACPI_MAX_ISA_IRQS)
- penalty += acpi_isa_irq_penalty[irq];
-
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
penalty += PIRQ_PENALTY_PCI_USING;
}
+ if (irq < ACPI_MAX_ISA_IRQS)
+ return penalty + acpi_isa_irq_penalty[irq];
+
penalty += acpi_irq_pci_sharing_penalty(irq);
return penalty;
}
+int __init acpi_irq_penalty_init(void)
+{
+ struct acpi_pci_link *link;
+ int i;
+
+ /*
+ * Update penalties to facilitate IRQ balancing.
+ */
+ list_for_each_entry(link, &acpi_link_list, list) {
+
+ /*
+ * reflect the possible and active irqs in the penalty table --
+ * useful for breaking ties.
+ */
+ if (link->irq.possible_count) {
+ int penalty =
+ PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
+
+ for (i = 0; i < link->irq.possible_count; i++) {
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
+ }
+
+ } else if (link->irq.active &&
+ (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
+ }
+ }
+
+ return 0;
+}
+
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
- active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
+ (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
bool acpi_isa_irq_available(int irq)
if (!pr->flags.throttling)
return -ENODEV;
+ /*
+ * We don't care about error returns - we just try to mark
+ * these reserved so that nobody else is confused into thinking
+ * that this region might be unused..
+ *
+ * (In particular, allocating the IO range for Cardbus)
+ */
+ request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
pr->throttling.state = 0;
duty_mask = pr->throttling.state_count - 1;
u32 val;
plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
- if (IS_ERR(plat_data))
+ if (!plat_data)
return &ahci_port_info;
plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
ata_scsi_port_error_handler(host, ap);
/* finish or retry handled scmd's and clean up */
- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+ WARN_ON(!list_empty(&eh_work_q));
DPRINTK("EXIT\n");
}
* Looks like a lot of fuss, but it avoids an unnecessary
* +1 usec read-after-write delay for unaffected registers.
*/
- laddr = (long)addr & 0xffff;
+ laddr = (unsigned long)addr & 0xffff;
if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f;
if (laddr == 0x4 || laddr == 0xc) {
"reserved 27",
"reserved 28",
"reserved 29",
- "reserved 30",
+ "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
"reassembly abort: no buffers",
"receive buffer overflow",
"change in GFC",
"receive buffer full",
"low priority discard - no receive descriptor",
"low priority discard - missing end of packet",
+ "reserved 37",
+ "reserved 38",
+ "reserved 39",
+ "reseverd 40",
"reserved 41",
"reserved 42",
"reserved 43",
/* make the ptr point to the corresponding buffer desc entry */
buf_desc_ptr += desc;
if (!desc || (desc > iadev->num_rx_desc) ||
- ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
+ ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
free_desc(dev, desc);
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
return -1;
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_ISA) += isa.o
+obj-$(CONFIG_ISA_BUS_API) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
return error;
}
-device_initcall(isa_bus_init);
+postcore_initcall(isa_bus_init);
static void module_create_drivers_dir(struct module_kobject *mk)
{
- if (!mk || mk->drivers_dir)
- return;
+ static DEFINE_MUTEX(drivers_dir_mutex);
- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+ mutex_lock(&drivers_dir_mutex);
+ if (mk && !mk->drivers_dir)
+ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+ mutex_unlock(&drivers_dir_mutex);
}
void module_add_driver(struct module *mod, struct device_driver *drv)
}
/* Mark opp-table as multiple CPUs are sharing it now */
- opp_table->shared_opp = true;
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
}
unlock:
mutex_unlock(&opp_table_lock);
*
* This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
*
- * Returns -ENODEV if OPP table isn't already present.
+ * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
+ * table's status is access-unknown.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
goto unlock;
}
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
cpumask_clear(cpumask);
- if (opp_table->shared_opp) {
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
} else {
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- return opp_table->shared_opp ? opp_table : NULL;
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+ return opp_table;
+
+ return NULL;
}
}
}
opp_table->np = opp_np;
- opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ if (of_property_read_bool(opp_np, "opp-shared"))
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+ else
+ opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
mutex_unlock(&opp_table_lock);
#endif
};
+enum opp_table_access {
+ OPP_TABLE_ACCESS_UNKNOWN = 0,
+ OPP_TABLE_ACCESS_EXCLUSIVE = 1,
+ OPP_TABLE_ACCESS_SHARED = 2,
+};
+
/**
* struct opp_table - Device opp structure
* @node: table node - contains the devices with OPPs that
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool shared_opp;
+ enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
unsigned int *supported_hw;
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
+/**
+ * device_get_named_child_node - Return first matching named child node handle
+ * @dev: Device to find the named child node for.
+ * @childname: String to match child node name against.
+ */
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname)
+{
+ struct fwnode_handle *child;
+
+ /*
+ * Find first matching named child node of this device.
+ * For ACPI this will be a data only sub-node.
+ */
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child)) {
+ if (!of_node_cmp(to_of_node(child)->name, childname))
+ return child;
+ } else if (is_acpi_data_node(child)) {
+ if (acpi_data_node_match(child, childname))
+ return child;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(device_get_named_child_node);
+
/**
* fwnode_handle_put - Drop reference to a device node
* @fwnode: Pointer to the device node to drop the reference to.
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
-#define BCMA_CORE_SIZE 0x1000
-
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
int ret;
/* get_zeroed_page returns page with ref count 1 */
- p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ p = (void *) get_zeroed_page(GFP_KERNEL);
if (!p)
return -ENOMEM;
empty_page = virt_to_page(p);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
- debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+ debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
}
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ /* Save uncomplete reqs and bios for migration. */
+ struct list_head requests;
+ struct bio_list bio_list;
};
static unsigned int nr_minors;
const struct blk_mq_queue_data *qd)
{
unsigned long flags;
- struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+ int qid = hctx->queue_num;
+ struct blkfront_info *info = hctx->queue->queuedata;
+ struct blkfront_ring_info *rinfo = NULL;
+ BUG_ON(info->nr_rings <= qid);
+ rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
return BLK_MQ_RQ_QUEUE_BUSY;
}
-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int index)
-{
- struct blkfront_info *info = (struct blkfront_info *)data;
-
- BUG_ON(info->nr_rings <= index);
- hctx->driver_data = &info->rinfo[index];
- return 0;
-}
-
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue,
- .init_hctx = blk_mq_init_hctx,
};
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return PTR_ERR(rq);
}
+ rq->queuedata = info;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) {
{
unsigned int i, r_index;
struct request *req, *n;
- struct blk_shadow *copy;
int rc;
struct bio *bio, *cloned_bio;
- struct bio_list bio_list, merge_bio;
unsigned int segs, offset;
int pending, size;
struct split_bio *split_bio;
- struct list_head requests;
blkfront_gather_backend_features(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs);
- bio_list_init(&bio_list);
- INIT_LIST_HEAD(&requests);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
- /* Stage 1: Make a safe copy of the shadow state. */
- copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
- GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
- if (!copy)
- return -ENOMEM;
-
- /* Stage 2: Set up free list. */
- memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
- for (i = 0; i < BLK_RING_SIZE(info); i++)
- rinfo->shadow[i].req.u.rw.id = i+1;
- rinfo->shadow_free = rinfo->ring.req_prod_pvt;
- rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+ struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
rc = blkfront_setup_indirect(rinfo);
- if (rc) {
- kfree(copy);
+ if (rc)
return rc;
- }
-
- for (i = 0; i < BLK_RING_SIZE(info); i++) {
- /* Not in use? */
- if (!copy[i].request)
- continue;
-
- /*
- * Get the bios in the request so we can re-queue them.
- */
- if (copy[i].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
- /*
- * Flush operations don't contain bios, so
- * we need to requeue the whole request
- */
- list_add(©[i].request->queuelist, &requests);
- continue;
- }
- merge_bio.head = copy[i].request->bio;
- merge_bio.tail = copy[i].request->biotail;
- bio_list_merge(&bio_list, &merge_bio);
- copy[i].request->bio = NULL;
- blk_end_request_all(copy[i].request, 0);
- }
-
- kfree(copy);
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
kick_pending_request_queues(rinfo);
}
- list_for_each_entry_safe(req, n, &requests, queuelist) {
+ list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
}
blk_mq_kick_requeue_list(info->rq);
- while ((bio = bio_list_pop(&bio_list)) != NULL) {
+ while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
if (bio_segments(bio) > segs) {
/*
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
+ unsigned int i, j;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+ bio_list_init(&info->bio_list);
+ INIT_LIST_HEAD(&info->requests);
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct bio_list merge_bio;
+ struct blk_shadow *shadow = rinfo->shadow;
+
+ for (j = 0; j < BLK_RING_SIZE(info); j++) {
+ /* Not in use? */
+ if (!shadow[j].request)
+ continue;
+
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
+ if (shadow[j].request->cmd_flags &
+ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ /*
+ * Flush operations don't contain bios, so
+ * we need to requeue the whole request
+ */
+ list_add(&shadow[j].request->queuelist, &info->requests);
+ continue;
+ }
+ merge_bio.head = shadow[j].request->bio;
+ merge_bio.tail = shadow[j].request->biotail;
+ bio_list_merge(&info->bio_list, &merge_bio);
+ shadow[j].request->bio = NULL;
+ blk_mq_end_request(shadow[j].request, 0);
+ }
+ }
+
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);
return err;
err = talk_to_blkback(dev, info);
+ if (!err)
+ blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
/*
* We have to wait for the backend to switch to
break;
case XenbusStateConnected:
- if (dev->state != XenbusStateInitialised) {
+ /*
+ * talk_to_blkback sets state to XenbusStateInitialised
+ * and blkfront_connect sets it to XenbusStateConnected
+ * (if connection went OK).
+ *
+ * If the backend (or toolstack) decides to poke at backend
+ * state (and re-trigger the watch by setting the state repeatedly
+ * to XenbusStateConnected (4)) we need to deal with this.
+ * This is allowed as this is used to communicate to the guest
+ * that the size of disk has changed!
+ */
+ if ((dev->state != XenbusStateInitialised) &&
+ (dev->state != XenbusStateConnected)) {
if (talk_to_blkback(dev, info))
break;
}
+
blkfront_connect(info);
break;
while (!list_empty(&intf->waiting_rcv_msgs)) {
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
flags);
if (rv > 0) {
/*
* To preserve message order, quit if we
- * can't handle a message.
+ * can't handle a message. Add the message
+ * back at the head, this is safe because this
+ * tasklet is the only thing that pulls the
+ * messages.
*/
+ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
break;
} else {
- list_del(&smi_msg->link);
if (rv == 0)
/* Message handled */
ipmi_free_smi_msg(smi_msg);
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
+ select MFD_SYSCON if ARCH_LPC18XX
---help---
Support for clock providers on NXP platforms.
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned int mask = layout->css_mask;
- unsigned int pckr = 0;
+ unsigned int pckr = index;
if (layout->have_slck_mck)
mask |= AT91_PMC_CSSMCK_MCK;
return -ENOMEM;
regmap = syscon_node_to_regmap(of_get_parent(np));
- if (!regmap) {
+ if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
- return -EINVAL;
+ return PTR_ERR(regmap);
}
for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
/* register fixed rate clocks */
clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
- CLK_IS_ROOT, 24000000);
+ 0, 24000000);
clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
- CLK_IS_ROOT, 8000000);
+ 0, 8000000);
clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
- CLK_IS_ROOT, 8000000);
+ 0, 8000000);
clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
- CLK_IS_ROOT, 32000);
+ 0, 32000);
clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
- CLK_IS_ROOT, 24000000);
+ 0, 24000000);
/* fixed rate (optional) clock */
if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
pr_info("pic32-clk: dt requests SOSC.\n");
}
cclk = clk_register(NULL, &cpuclk->hw);
- if (IS_ERR(clk)) {
+ if (IS_ERR(cclk)) {
pr_err("%s: could not register cpuclk %s\n", __func__, name);
- ret = PTR_ERR(clk);
+ ret = PTR_ERR(cclk);
goto free_rate_table;
}
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
-#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
-#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
#define PSECS_PER_SEC 1000000000000LL
return ERR_PTR(-ENOMEM);
init.name = name;
+ init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
mmc_clock->reg = reg;
mmc_clock->shift = shift;
- /*
- * Assert init_state to soft reset the CLKGEN
- * for mmc tuning phase and degree
- */
- if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
- writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
- ROCKCHIP_MMC_INIT_STATE_RESET,
- mmc_clock->shift), mmc_clock->reg);
-
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
kfree(mmc_clock);
RK3399_CLKGATE_CON(13), 1, GFLAGS),
/* perihp */
- GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+ GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
+ GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 1, GFLAGS),
COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
static const char *const rk3399_cru_critical_clocks[] __initconst = {
"aclk_cci_pre",
+ "aclk_gic",
+ "aclk_gic_noc",
"pclk_perilp0",
"pclk_perilp0",
"hclk_perilp0",
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
return;
}
ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ iounmap(reg_base);
return;
}
u8 width_div;
u8 width_mux;
+
+ u32 flags;
};
struct reset_data {
data->has_div ? &div->hw : NULL,
data->has_div ? &clk_divider_ops : NULL,
&gate->hw, &clk_gate_ops,
- 0);
+ data->flags);
if (IS_ERR(clk)) {
pr_err("%s: Couldn't register the clock\n", clk_name);
goto free_div;
.offset_rst = 29,
.offset_mux = 24,
.width_mux = 2,
+ .flags = CLK_SET_RATE_PARENT,
};
static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
static u8 tcon_ch1_get_parent(struct clk_hw *hw)
{
struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 reg;
reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
- if (reg >= num_parents)
- return -EINVAL;
-
return reg;
}
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
-static inline void get_seq(__u32 *ts, int *cpu)
+static inline void send_msg(struct cn_msg *msg)
{
preempt_disable();
- *ts = __this_cpu_inc_return(proc_event_counts) - 1;
- *cpu = smp_processor_id();
+
+ msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+ ((struct proc_event *)msg->data)->cpu = smp_processor_id();
+
+ /*
+ * Preemption remains disabled during send to ensure the messages are
+ * ordered according to their sequence numbers.
+ *
+ * If cn_netlink_send() fails, the data is not sent.
+ */
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
+
preempt_enable();
}
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- /* If cn_netlink_send() failed, the data is not sent */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_exec_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_id_connector(struct task_struct *task, int which_id)
return;
}
rcu_read_unlock();
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_sid_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_comm_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_coredump_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_exit_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
/*
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
/**
static int __init cpufreq_dt_platdev_init(void)
{
struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
if (!np)
return -ENODEV;
- if (!of_match_node(machines, np))
+ match = of_match_node(machines, np);
+ of_node_put(np);
+ if (!match)
return -ENODEV;
- of_node_put(of_root);
-
return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
NULL, 0));
}
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
return cpufreq_driver->fast_switch(policy, target_freq);
}
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ if (cpufreq_suspended) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
new_policy.cur = cpufreq_update_current_freq(policy);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;
return acpi_ppc;
}
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
- return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
- int turbo_pss_ctl;
int ret;
int i;
* max frequency, which will cause a reduced performance as
* this driver uses real max turbo frequency as the max
* frequency. So correct this frequency in _PSS table to
- * correct max turbo frequency based on the turbo ratio.
+ * correct max turbo frequency based on the turbo state.
* Also need to convert to MHz as _PSS freq is in MHz.
*/
- turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
- if (turbo_pss_ctl > cpu->pstate.max_pstate)
+ if (!limits->turbo_disabled)
cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
- pr_info("_PPC limits will be enforced\n");
+ pr_debug("_PPC limits will be enforced\n");
return;
{
struct cpudata *cpu = all_cpu_data[cpu_num];
+ if (cpu->update_util_set)
+ return;
+
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- intel_pstate_clear_update_util_hook(policy->cpu);
+ pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
cpu = all_cpu_data[0];
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
limits->max_sysfs_pct);
limits->max_perf_pct = max(limits->min_policy_pct,
limits->max_perf_pct);
- limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
limits->min_perf = div_fp(limits->min_perf_pct, 100);
limits->max_perf = div_fp(limits->max_perf_pct, 100);
+ limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
intel_pstate_set_update_util_hook(policy->cpu);
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
- policy->cpuinfo.max_freq =
- cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ update_turbo_state();
+ policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
intel_pstate_init_acpi_perf_limits(policy);
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
doorbell.space_id = reg_resource->space_id;
doorbell.bit_width = reg_resource->bit_width;
doorbell.bit_offset = reg_resource->bit_offset;
- doorbell.access_width = 64;
+ doorbell.access_width = 4;
doorbell.address = reg_resource->address;
pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
- u64 time_start, time_end;
+ ktime_t time_start, time_end;
s64 diff;
/*
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
- time_start = local_clock();
+ time_start = ns_to_ktime(local_clock());
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
- time_end = local_clock();
+ time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- /*
- * local_clock() returns the time in nanosecond, let's shift
- * by 10 (divide by 1024) to have microsecond based time.
- */
- diff = (time_end - time_start) >> 10;
+ diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
+ u32 unit_size;
int ret;
if (!ctx->u.aes.key_len)
if (!req->info)
return -EINVAL;
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
- if (!(req->nbytes & (unit_size_map[unit].size - 1)))
- break;
+ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+ if (req->nbytes <= unit_size_map[0].size) {
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+ if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+ unit_size = unit_size_map[unit].value;
+ break;
+ }
+ }
+ }
- if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
- rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.unit_size = unit_size;
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = &rctx->iv_sg;
&dd->pdata->algs_info[i].algs_list[j]);
err_pm:
pm_runtime_disable(dev);
- if (dd->polling_mode)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
$(obj)/qat_rsapubkey-asn1.h
$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
$(obj)/qat_rsaprivkey-asn1.h
+$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
&device_data->state);
memmove(req_ctx->state.buffer,
device_data->state.buffer,
- HASH_BLOCK_SIZE / sizeof(u32));
+ HASH_BLOCK_SIZE);
if (ret) {
dev_err(device_data->dev,
"%s: hash_resume_state() failed!\n",
memmove(device_data->state.buffer,
req_ctx->state.buffer,
- HASH_BLOCK_SIZE / sizeof(u32));
+ HASH_BLOCK_SIZE);
if (ret) {
dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
__func__);
.cra_name = "cbc(aes)",
.cra_driver_name = "p8_aes_cbc",
.cra_module = THIS_MODULE,
- .cra_priority = 1000,
+ .cra_priority = 2000,
.cra_type = &crypto_blkcipher_type,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
.cra_alignmask = 0,
.cra_name = "ctr(aes)",
.cra_driver_name = "p8_aes_ctr",
.cra_module = THIS_MODULE,
- .cra_priority = 1000,
+ .cra_priority = 2000,
.cra_type = &crypto_blkcipher_type,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
.cra_alignmask = 0,
# Some ABIs specify vrsave, special-purpose register #256, as reserved
# for system use.
-my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
my $mtspr = sub {
my ($f,$idx,$ra) = @_;
if ($idx == 256 && $no_vrsave) {
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
- if (err)
+ if (err) {
+ freqs.new = cur_freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
return err;
+ }
freqs.new = freq;
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
+ devfreq->last_status.current_frequency = profile->initial_freq;
devfreq->data = data;
devfreq->nb.notifier_call = devfreq_notifier_call;
mutex_lock(&devfreq->lock);
}
- devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
- devfreq->profile->max_state *
- devfreq->profile->max_state,
- GFP_KERNEL);
- devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
- devfreq->profile->max_state,
- GFP_KERNEL);
- devfreq->last_stat_updated = jiffies;
-
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
- put_device(&devfreq->dev);
mutex_unlock(&devfreq->lock);
goto err_out;
}
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+ devfreq->profile->max_state *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->last_stat_updated = jiffies;
+
srcu_init_notifier_head(&devfreq->transition_notifier_list);
mutex_unlock(&devfreq->lock);
err_init:
list_del(&devfreq->node);
device_unregister(&devfreq->dev);
- kfree(devfreq);
err_out:
return ERR_PTR(err);
}
return -EINVAL;
device_unregister(&devfreq->dev);
- put_device(&devfreq->dev);
return 0;
}
/* Maps the memory mapped IO to control nocp register */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/reservation.h>
+#include <linux/mm.h>
#include <uapi/linux/dma-buf.h>
dmabuf = file->private_data;
/* check for overflowing the buffer's size */
- if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (vma->vm_pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
return -EINVAL;
/* check for offset overflow */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ if (pgoff + vma_pages(vma) < pgoff)
return -EOVERFLOW;
/* check for overflowing the buffer's size */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
#include <linux/reservation.h>
#include <linux/export.h>
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer. A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations). The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
DEFINE_WW_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence(). Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
*/
int reservation_object_reserve_shared(struct reservation_object *obj)
{
fence_put(old_fence);
}
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
* Add a fence to a shared slot, obj->lock must be held, and
* reservation_object_reserve_shared_fence has been called.
*/
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot. The obj->lock must be held.
+ */
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
{
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct fence **pfence_excl,
unsigned *pshared_count,
}
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
return ret;
}
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
u32 mbr_dus; /* Destination Microblock Stride Register */
};
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
struct at_xdmac_desc {
struct at_xdmac_lld lld;
enum dma_transfer_direction direction;
unsigned int xfer_size;
struct list_head descs_list;
struct list_head xfer_node;
-};
+} __aligned(sizeof(u64));
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
+ bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
residue = desc->xfer_size;
/*
* Flush FIFO: only relevant when the transfer is source peripheral
- * synchronized.
+ * synchronized. Flush is needed before reading CUBC because data in
+ * the FIFO are not reported by CUBC. Reporting a residue of the
+ * transfer length while we have data in FIFO can cause issue.
+ * Usecase: atmel USART has a timeout which means I have received
+ * characters but there is no more character received for a while. On
+ * timeout, it requests the residue. If the data are in the DMA FIFO,
+ * we will return a residue of the transfer length. It means no data
+ * received. If an application is waiting for these data, it will hang
+ * since we won't have another USART timeout without receiving new
+ * data.
*/
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
}
/*
- * When processing the residue, we need to read two registers but we
- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
- * we stand in the descriptor list and AT_XDMAC_CUBC is used
- * to know how many data are remaining for the current descriptor.
- * Since the dma channel is not paused to not loose data, between the
- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
- * descriptor.
- * For that reason, after reading AT_XDMAC_CUBC, we check if we are
- * still using the same descriptor by reading a second time
- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
- * read again AT_XDMAC_CUBC.
+ * The easiest way to compute the residue should be to pause the DMA
+ * but doing this can lead to miss some data as some devices don't
+ * have FIFO.
+ * We need to read several registers because:
+ * - DMA is running therefore a descriptor change is possible while
+ * reading these registers
+ * - When the block transfer is done, the value of the CUBC register
+ * is set to its initial value until the fetch of the next descriptor.
+ * This value will corrupt the residue calculation so we have to skip
+ * it.
+ *
+ * INITD -------- ------------
+ * |____________________|
+ * _______________________ _______________
+ * NDA @desc2 \/ @desc3
+ * _______________________/\_______________
+ * __________ ___________ _______________
+ * CUBC 0 \/ MAX desc1 \/ MAX desc2
+ * __________/\___________/\_______________
+ *
+ * Since descriptors are aligned on 64 bits, we can assume that
+ * the update of NDA and CUBC is atomic.
* Memory barriers are used to ensure the read order of the registers.
- * A max number of retries is set because unlikely it can never ends if
- * we are transferring a lot of data with small buffers.
+ * A max number of retries is set because unlikely it could never ends.
*/
- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
- rmb();
- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
- rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
- if (likely(cur_nda == check_nda))
- break;
-
- cur_nda = check_nda;
+ rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ rmb();
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+
+ if ((check_nda == cur_nda) && initd)
+ break;
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
goto spin_unlock;
}
+ /*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized. Another flush is needed here because CUBC is updated
+ * when the controller sends the data write command. It can lead to
+ * report data that are not written in the memory or the device. The
+ * FIFO flush ensures that data are really written.
+ */
+ if ((desc->lld.mbr_cfg & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
goto free_resources;
}
- src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
- PAGE_SIZE, DMA_TO_DEVICE);
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
+ (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+ DMA_TO_DEVICE);
unmap->addr[0] = src_dma;
ret = dma_mapping_error(dma_chan->device->dev, src_dma);
}
unmap->to_cnt = 1;
- dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
+ (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+ DMA_FROM_DEVICE);
unmap->addr[1] = dest_dma;
ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mod_work(&mci->work, value);
+ if (mci->op_state == OP_RUNNING_POLL)
+ edac_mod_work(&mci->work, value);
}
mutex_unlock(&mem_ctls_mutex);
}
{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
};
-#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+ GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+ GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
/* Device 16, functions 2-7 */
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs;
+ enum type type;
};
struct sbridge_dev {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
};
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) { \
+ .descr = A, \
+ .n_devs = ARRAY_SIZE(A), \
+ .type = T \
+}
+
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
{0,} /* 0 terminated list. */
};
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
{0,}
};
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
{0,} /* 0 terminated list. */
};
pci_read_config_dword(pvt->pci_tad[i],
rir_offset[j][k],
®);
- tmp_mb = RIR_OFFSET(reg) << 6;
+ tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
i, j, k,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
+ (u32)RIR_RNK_TGT(pvt->info.type, reg),
reg);
}
}
pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
rir_offset[n_rir][idx],
®);
- *rank = RIR_RNK_TGT(reg);
+ *rank = RIR_RNK_TGT(pvt->info.type, reg);
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
* @num_mc: pointer to the memory controllers count, to be incremented in case
* of success.
* @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- * (as implemented, this isn't expected to work correctly in the
- * multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- * memory controllers.
*
* returns 0 in case of success or error code
*/
-static int sbridge_get_all_devices_full(u8 *num_mc,
- const struct pci_id_table *table,
- int allow_dups,
- int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+ const struct pci_id_table *table)
{
int i, rc;
struct pci_dev *pdev = NULL;
+ int allow_dups = 0;
+ int multi_bus = 0;
+ if (table->type == KNIGHTS_LANDING)
+ allow_dups = multi_bus = 1;
while (table && table->descr) {
for (i = 0; i < table->n_devs; i++) {
if (!allow_dups || i == 0 ||
return 0;
}
-#define sbridge_get_all_devices(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
#define ICPU(model, table) \
{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
-/* Order here must match "enum type" */
static const struct x86_cpu_id sbridge_cpuids[] = {
ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
+ ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
{ }
};
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+ rc = sbridge_register_mci(sbridge_dev, ptable->type);
if (unlikely(rc < 0))
goto fail1;
}
palmas_enable_irq(palmas_usb);
/* perform initial detection */
+ if (palmas_usb->enable_gpio_vbus_detection)
+ palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
device_set_wakeup_capable(&pdev->dev, true);
return 0;
{
efi_memory_desc_t *md;
u64 paddr, npages, size;
+ int resv;
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
paddr = md->phys_addr;
npages = md->num_pages;
+ resv = is_reserve_region(md);
if (efi_enabled(EFI_DBG)) {
char buf[64];
- pr_info(" 0x%012llx-0x%012llx %s",
+ pr_info(" 0x%012llx-0x%012llx %s%s\n",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
- efi_md_typeattr_format(buf, sizeof(buf), md));
+ efi_md_typeattr_format(buf, sizeof(buf), md),
+ resv ? "*" : "");
}
memrange_efi_to_native(&paddr, &npages);
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
- if (is_reserve_region(md)) {
+ if (resv)
memblock_mark_nomap(paddr, size);
- if (efi_enabled(EFI_DBG))
- pr_cont("*");
- }
- if (efi_enabled(EFI_DBG))
- pr_cont("\n");
}
set_bit(EFI_MEMMAP, &efi.flags);
menuconfig GPIOLIB
bool "GPIO Support"
+ select ANON_INODES
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
config OF_GPIO
def_bool y
- depends on OF || COMPILE_TEST
+ depends on OF
config GPIO_ACPI
def_bool y
select OF_GPIO
config GPIO_TEGRA
- bool
- default y
+ bool "NVIDIA Tegra GPIO support"
+ default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on OF
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
config GPIO_104_IDI_48
tristate "ACCES 104-IDI-48 GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the WinSystems WS16C48. The base port
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
const unsigned io_port = offset / 8;
- const unsigned control_port = io_port / 2;
+ const unsigned int control_port = io_port / 3;
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
unsigned long flags;
unsigned control;
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
const unsigned io_port = offset / 8;
- const unsigned control_port = io_port / 2;
+ const unsigned int control_port = io_port / 3;
const unsigned mask = BIT(offset % 8);
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
idi48gpio->irq = irq[id];
spin_lock_init(&idi48gpio->lock);
+ spin_lock_init(&idi48gpio->ack_lock);
dev_set_drvdata(dev, idi48gpio);
/* disable interrupts and clear status */
for (i = 0; i < kona_gpio->num_bank; i++) {
/* Unlock the entire bank first */
- bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
+ bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
/* Now re-lock the bank */
- bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
+ bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
}
}
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
{
- return IRQ_LPC32XX_P0_P1_IRQ;
+ return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
- IRQ_LPC32XX_GPIO_00,
- IRQ_LPC32XX_GPIO_01,
- IRQ_LPC32XX_GPIO_02,
- IRQ_LPC32XX_GPIO_03,
- IRQ_LPC32XX_GPIO_04,
- IRQ_LPC32XX_GPIO_05,
-};
-
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
- return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
- IRQ_LPC32XX_GPI_00,
- IRQ_LPC32XX_GPI_01,
- IRQ_LPC32XX_GPI_02,
- IRQ_LPC32XX_GPI_03,
- IRQ_LPC32XX_GPI_04,
- IRQ_LPC32XX_GPI_05,
- IRQ_LPC32XX_GPI_06,
- IRQ_LPC32XX_GPI_07,
- IRQ_LPC32XX_GPI_08,
- IRQ_LPC32XX_GPI_09,
- -ENXIO, /* 10 */
- -ENXIO, /* 11 */
- -ENXIO, /* 12 */
- -ENXIO, /* 13 */
- -ENXIO, /* 14 */
- -ENXIO, /* 15 */
- -ENXIO, /* 16 */
- -ENXIO, /* 17 */
- -ENXIO, /* 18 */
- IRQ_LPC32XX_GPI_19,
- -ENXIO, /* 20 */
- -ENXIO, /* 21 */
- -ENXIO, /* 22 */
- -ENXIO, /* 23 */
- -ENXIO, /* 24 */
- -ENXIO, /* 25 */
- -ENXIO, /* 26 */
- -ENXIO, /* 27 */
- IRQ_LPC32XX_GPI_28,
-};
-
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
- return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
return -ENXIO;
}
return gpio % 8;
}
-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
{
- struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
return reg_val;
}
-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
int val)
{
- struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GIO, 1);
+ sch_gpio_reg_set(sch, gpio_num, GIO, 1);
spin_unlock(&sch->lock);
return 0;
}
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
{
- return sch_gpio_reg_get(gc, gpio_num, GLV);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ return sch_gpio_reg_get(sch, gpio_num, GLV);
}
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GLV, val);
+ sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock(&sch->lock);
}
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GIO, 0);
+ sch_gpio_reg_set(sch, gpio_num, GIO, 0);
spin_unlock(&sch->lock);
/*
* GPIO7 is configured by the CMC as SLPIOVR
* Enable GPIO[9:8] core powered gpios explicitly
*/
- sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
- sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
+ sch_gpio_reg_set(sch, 8, GEN, 1);
+ sch_gpio_reg_set(sch, 9, GEN, 1);
/*
* SUS_GPIO[2:0] enabled by default
* Enable SUS_GPIO3 resume powered gpio explicitly
*/
- sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
+ sch_gpio_reg_set(sch, 13, GEN, 1);
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
const struct tegra_gpio_soc_config *soc;
struct gpio_chip gc;
struct irq_chip ic;
- struct lock_class_key lock_class;
u32 bank_count;
};
SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different category
+ * than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
static int tegra_gpio_probe(struct platform_device *pdev)
{
const struct tegra_gpio_soc_config *config;
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &tgi->lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(gpio->clk);
}
+ ret = clk_prepare_enable(gpio->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
pm_runtime_put(&pdev->dev);
err_pm_dis:
pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(gpio->clk);
return ret;
}
if (!desc && gpio_is_valid(gpio))
return -EPROBE_DEFER;
+ err = gpiod_request(desc, label);
+ if (err)
+ return err;
+
if (flags & GPIOF_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (flags & GPIOF_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- err = gpiod_request(desc, label);
- if (err)
- return err;
-
if (flags & GPIOF_DIR_IN)
err = gpiod_direction_input(desc);
else
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/io-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
{
struct gpio_device *gdev = filp->private_data;
struct gpio_chip *chip = gdev->chip;
- int __user *ip = (int __user *)arg;
+ void __user *ip = (void __user *)arg;
/* We fail any subsequent ioctl():s when the chip is gone */
if (!chip)
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = gpio_ioctl,
- .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gpio_ioctl_compat,
+#endif
};
static void gpiodevice_release(struct device *dev)
{
struct gpio_device *gdev = dev_get_drvdata(dev);
- cdev_del(&gdev->chrdev);
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
kfree(gdev->label);
/* From this point, the .release() function cleans up gpio_device */
gdev->dev.release = gpiodevice_release;
- get_device(&gdev->dev);
pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
__func__, gdev->base, gdev->base + gdev->ngpio - 1,
dev_name(&gdev->dev), gdev->chip->label ? : "generic");
goto err_free_label;
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
}
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
* be removed, else it will be dangling until the last user is
* gone.
*/
+ cdev_del(&gdev->chrdev);
+ device_del(&gdev->dev);
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list)
- if (match(gdev->chip, data))
+ if (gdev->chip && match(gdev->chip, data))
break;
/* No match? */
spin_lock_irqsave(&gpio_lock, flags);
}
done:
- if (status < 0) {
- /* Clear flags that might have been set by the caller before
- * requesting the GPIO.
- */
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
- clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
- clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
- }
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
/*
* This descriptor validation needs to be inserted verbatim into each
* function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
*/
#define VALIDATE_DESC(desc) do { \
- if (!desc || !desc->gdev) { \
- pr_warn("%s: invalid GPIO\n", __func__); \
+ if (!desc) \
+ return 0; \
+ if (IS_ERR(desc)) { \
+ pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+ return PTR_ERR(desc); \
+ } \
+ if (!desc->gdev) { \
+ pr_warn("%s: invalid GPIO (no device)\n", __func__); \
return -EINVAL; \
} \
if ( !desc->gdev->chip ) { \
} } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc || !desc->gdev) { \
- pr_warn("%s: invalid GPIO\n", __func__); \
+ if (!desc) \
+ return; \
+ if (IS_ERR(desc)) { \
+ pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+ return; \
+ } \
+ if (!desc->gdev) { \
+ pr_warn("%s: invalid GPIO (no device)\n", __func__); \
return; \
} \
if (!desc->gdev->chip) { \
struct gpio_chip *chip;
int offset;
- VALIDATE_DESC(desc);
+ /*
+ * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
+ * requires this function to not return zero on an invalid descriptor
+ * but rather a negative error number.
+ */
+ if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+ return -EINVAL;
+
chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
if (chip->to_irq) {
*/
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
+ struct gpio_desc *desc;
- if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ /* Flush direction if something changed behind our back */
+ if (chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
+
+ if (dir)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
+
+ if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &desc->flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
}
EXPORT_SYMBOL_GPL(gpiod_get_optional);
-/**
- * gpiod_parse_flags - helper function to parse GPIO lookup flags
- * @desc: gpio to be setup
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
- *
- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
- */
-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
-{
- if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-}
/**
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
+ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
+ * of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
* occurred while trying to acquire the GPIO.
*/
static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
- enum gpiod_flags dflags)
+ unsigned long lflags, enum gpiod_flags dflags)
{
int status;
+ if (lflags & GPIO_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIO_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (lflags & GPIO_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
pr_debug("no flags found for %s\n", con_id);
return desc;
}
- gpiod_parse_flags(desc, lookupflags);
-
status = gpiod_request(desc, con_id);
if (status < 0)
return ERR_PTR(status);
- status = gpiod_configure_flags(desc, con_id, flags);
+ status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (status < 0) {
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
if (IS_ERR(desc))
return desc;
+ ret = gpiod_request(desc, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
if (active_low)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
}
- ret = gpiod_request(desc, NULL);
- if (ret)
- return ERR_PTR(ret);
-
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
chip = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
- gpiod_parse_flags(desc, lflags);
-
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
if (IS_ERR(local_desc)) {
status = PTR_ERR(local_desc);
return status;
}
- status = gpiod_configure_flags(desc, name, dflags);
+ status = gpiod_configure_flags(desc, name, lflags, dflags);
if (status < 0) {
pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, chip->label, hwnum, status);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+ /* query virtual capabilities */
+ u32 (*get_virtual_caps)(struct amdgpu_device *adev);
};
/*
/* GPU virtualization */
+#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
+#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
struct amdgpu_virtualization {
bool supports_sr_iov;
+ bool is_virtual;
+ u32 caps;
};
/*
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
+#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
return result;
}
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+ release_firmware(adev->pm.fw);
+ return 0;
+ }
+ /* cannot release other firmware because they are not created by cgs */
+ return -EINVAL;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
struct cgs_acpi_method_argument *argument = NULL;
uint32_t i, count;
acpi_status status;
- int result;
+ int result = 0;
uint32_t func_no = 0xFFFFFFFF;
handle = ACPI_HANDLE(&adev->pdev->dev);
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
+ amdgpu_cgs_rel_firmware,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
*/
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
{
- if (adev->mode_info.atom_context)
+ if (adev->mode_info.atom_context) {
kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
kfree(adev->mode_info.atom_context);
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
adev->ip_block_status[i].valid = false;
}
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (adev->ip_blocks[i].funcs->late_fini)
+ adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ }
+
return 0;
}
return 0;
}
+static bool amdgpu_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_init - initialize the driver
*
adev->virtualization.supports_sr_iov =
amdgpu_atombios_has_gpu_virtualization_table(adev);
+ /* Check if we are executing in a virtualized environment */
+ adev->virtualization.is_virtual = amdgpu_device_is_virtual();
+ adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+
/* Post card if necessary */
if (!amdgpu_card_posted(adev) ||
- adev->virtualization.supports_sr_iov) {
+ (adev->virtualization.is_virtual &&
+ !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
}
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
- dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
+ dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines;
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
dev_info._pad = 0;
dev_info.ids_flags = 0;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state = 0;
- long idx;
+ unsigned long idx;
int ret;
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
- else {
- ret = kstrtol(buf, 0, &idx);
+ else if (adev->pp_enabled) {
+ struct pp_states_info data;
- if (ret) {
+ ret = kstrtoul(buf, 0, &idx);
+ if (ret || idx >= ARRAY_SIZE(data.states)) {
count = -EINVAL;
goto fail;
}
- if (adev->pp_enabled) {
- struct pp_states_info data;
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
- adev->pp_force_state_enabled = true;
- }
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+ state = data.states[idx];
+ /* only set user selected power states */
+ if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+ state != POWER_STATE_TYPE_DEFAULT) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ adev->pp_force_state_enabled = true;
}
}
fail:
if (ret)
return ret;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_fini(adev->powerplay.pp_handle);
- }
-#endif
-
return ret;
}
return ret;
}
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pp_enabled) {
+ amdgpu_pm_sysfs_fini(adev);
+ amd_powerplay_fini(adev->powerplay.pp_handle);
+ }
+
+ if (adev->powerplay.ip_funcs->late_fini)
+ adev->powerplay.ip_funcs->late_fini(
+ adev->powerplay.pp_handle);
+#endif
+}
+
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
+ .late_fini = amdgpu_pp_late_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
ring->ring = NULL;
ring->ring_obj = NULL;
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
return r;
}
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ memset(sa_manager->cpu_ptr, 0, sa_manager->size);
amdgpu_bo_unreserve(sa_manager->bo);
return r;
}
{
int r;
- if (adev->uvd.vcpu_bo == NULL)
- return 0;
+ kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
+ if (adev->uvd.vcpu_bo) {
+ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+ if (!r) {
+ amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+ amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+ }
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ }
amdgpu_ring_fini(&adev->uvd.ring);
if (fences == 0 && handles == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
+ /* just work around for uvd clock remain high even
+ * when uvd dpm disabled on Polaris10 */
+ if (adev->asic_type == CHIP_POLARIS10)
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+ args.ucRegIndex = offset;
+ args.lpI2CDataOut = data;
+ args.ucFlag = 1;
+ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucTransBytes = 1;
+ args.ucSlaveAddr = slave_addr;
+ args.ucLineNumber = line_number;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num);
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+ u8 slave_addr, u8 line_number, u8 offset, u8 data);
#endif
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
return true;
}
+static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
+{
+ /* CIK does not support SR-IOV */
+ return 0;
+}
+
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
+ .get_virtual_caps = &cik_get_virtual_caps,
/* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ cik_sdma_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
if (r)
return r;
- /* unhalt the MEs */
- cik_sdma_enable(adev, true);
+ /* halt the engine before programing */
+ cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ cik_sdma_free_microcode(adev);
return 0;
}
static int fiji_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
return err;
}
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+}
+
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ gfx_v7_0_free_microcode(adev);
return 0;
}
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- if ((ring->me == me_id) & (ring->pipe == pipe_id))
+ if ((ring->me == me_id) && (ring->pipe == pipe_id))
amdgpu_fence_process(ring);
}
break;
#include "vid.h"
#include "amdgpu_ucode.h"
#include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
#include "clearstate_vi.h"
#include "gmc/gmc_8_2_d.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_NUM_COMPUTE_RINGS 8
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris10_golden_common_all[] =
amdgpu_program_register_sequence(adev,
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
+ WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+ if (adev->pdev->revision == 0xc7) {
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+ }
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
return r;
}
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ))
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
gfx_v8_0_rlc_fini(adev);
- kfree(adev->gfx.rlc.register_list_format);
+ gfx_v8_0_free_microcode(adev);
return 0;
}
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
- case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
+ case CHIP_TOPAZ:
+ amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+ 0x00000000 : 0x00000002);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);
static int iceland_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
}
}
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ sdma_v2_4_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
return -EINVAL;
}
- /* unhalt the MEs */
- sdma_v2_4_enable(adev, true);
+ /* halt the engine before programing */
+ sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v2_4_free_microcode(adev);
return 0;
}
}
}
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ /* unhalt the MEs */
+ sdma_v3_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v3_0_ctx_switch_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
}
}
- /* unhalt the MEs */
- sdma_v3_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v3_0_ctx_switch_enable(adev, true);
+ /* disble sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v3_0_free_microcode(adev);
return 0;
}
static int tonga_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
return true;
}
+static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+{
+ u32 caps = 0;
+ u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+ caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+ caps |= AMDGPU_VIRT_CAPS_IS_VF;
+
+ return caps;
+}
+
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true},
};
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
+ .get_virtual_caps = &vi_get_virtual_caps,
/* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
pqm_uninit(&p->pqm);
/* Iterate over all process device data structure and check
- * if we should reset all wavefronts */
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ * if we should delete debug managers and reset all wavefronts
+ */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ if ((pdd->dev->dbgmgr) &&
+ (pdd->dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
+
if (pdd->reset_wavefronts) {
pr_warn("amdkfd: Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
+ }
mutex_unlock(&p->mutex);
idx = srcu_read_lock(&kfd_processes_srcu);
+ /*
+ * Look for the process that matches the pasid. If there is no such
+ * process, we either released it in amdkfd's own notifier, or there
+ * is a bug. Unfortunately, there is no way to tell...
+ */
hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
- if (p->pasid == pasid)
- break;
+ if (p->pasid == pasid) {
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ srcu_read_unlock(&kfd_processes_srcu, idx);
- BUG_ON(p->pasid != pasid);
+ pr_debug("Unbinding process %d from IOMMU\n", pasid);
- mutex_lock(&p->mutex);
+ mutex_lock(&p->mutex);
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
+ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(dev->dbgmgr);
- pqm_uninit(&p->pqm);
+ pqm_uninit(&p->pqm);
- pdd = kfd_get_process_device_data(dev, p);
+ pdd = kfd_get_process_device_data(dev, p);
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
- }
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ if (pdd->reset_wavefronts) {
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
- /*
- * Just mark pdd as unbound, because we still need it to call
- * amd_iommu_unbind_pasid() in when the process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ /*
+ * Just mark pdd as unbound, because we still need it
+ * to call amd_iommu_unbind_pasid() in when the
+ * process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ pdd->bound = false;
- mutex_unlock(&p->mutex);
+ mutex_unlock(&p->mutex);
+
+ return;
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
int (*hw_init)(void *handle);
/* tears down the hw state */
int (*hw_fini)(void *handle);
+ void (*late_fini)(void *handle);
/* handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
/* handles IP specific hw/sw changes for resume */
ULONG ulReserved[12];
}ATOM_ASIC_PROFILING_INFO_V3_5;
+/* for Polars10/11 AVFS parameters */
+typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ULONG ulMaxVddc;
+ ULONG ulMinVddc;
+ USHORT usLkgEuseIndex;
+ UCHAR ucLkgEfuseBitLSB;
+ UCHAR ucLkgEfuseLength;
+ ULONG ulLkgEncodeLn_MaxDivMin;
+ ULONG ulLkgEncodeMax;
+ ULONG ulLkgEncodeMin;
+ EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+ ULONG ulEvvDefaultVddc;
+ ULONG ulEvvNoCalcVddc;
+ ULONG ulSpeed_Model;
+ ULONG ulSM_A0;
+ ULONG ulSM_A1;
+ ULONG ulSM_A2;
+ ULONG ulSM_A3;
+ ULONG ulSM_A4;
+ ULONG ulSM_A5;
+ ULONG ulSM_A6;
+ ULONG ulSM_A7;
+ UCHAR ucSM_A0_sign;
+ UCHAR ucSM_A1_sign;
+ UCHAR ucSM_A2_sign;
+ UCHAR ucSM_A3_sign;
+ UCHAR ucSM_A4_sign;
+ UCHAR ucSM_A5_sign;
+ UCHAR ucSM_A6_sign;
+ UCHAR ucSM_A7_sign;
+ ULONG ulMargin_RO_a;
+ ULONG ulMargin_RO_b;
+ ULONG ulMargin_RO_c;
+ ULONG ulMargin_fixed;
+ ULONG ulMargin_Fmax_mean;
+ ULONG ulMargin_plat_mean;
+ ULONG ulMargin_Fmax_sigma;
+ ULONG ulMargin_plat_sigma;
+ ULONG ulMargin_DC_sigma;
+ ULONG ulLoadLineSlop;
+ ULONG ulaTDClimitPerDPM[8];
+ ULONG ulaNoCalcVddcPerDPM[8];
+ ULONG ulAVFS_meanNsigma_Acontant0;
+ ULONG ulAVFS_meanNsigma_Acontant1;
+ ULONG ulAVFS_meanNsigma_Acontant2;
+ USHORT usAVFS_meanNsigma_DC_tol_sigma;
+ USHORT usAVFS_meanNsigma_Platform_mean;
+ USHORT usAVFS_meanNsigma_Platform_sigma;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a0;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a1;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a2;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a0;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a1;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSON_b;
+ USHORT usMaxVoltage_0_25mv;
+ UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF;
+ UCHAR ucEnableGB_VDROOP_TABLE_CKSON;
+ UCHAR ucEnableGB_FUSE_TABLE_CKSOFF;
+ UCHAR ucEnableGB_FUSE_TABLE_CKSON;
+ USHORT usPSM_Age_ComFactor;
+ UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage;
+ UCHAR ucReserved;
+}ATOM_ASIC_PROFILING_INFO_V3_6;
+
typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
ULONG ulMaxSclkFreq;
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type);
+
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
+ cgs_rel_firmware rel_firmware;
/* cg pg interface*/
cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state;
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type) \
+ CGS_CALL(rel_firmware, dev, type)
#define cgs_set_powergating_state(dev, block_type, state) \
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
- goto err;
+ goto err1;
pr_info("amdgpu: powerplay initialized\n");
return 0;
+err1:
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
err:
pr_err("amdgpu: powerplay initialization failed\n");
return ret;
if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+
return ret;
}
pem_unregister_interrupts(eventmgr);
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
- if (eventmgr != NULL)
- kfree(eventmgr);
}
int eventmgr_init(struct pp_instance *handle)
data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
{
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+ if (display_config == NULL)
return -EINVAL;
hwmgr->display_config = *display_config;
+
+ if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+ return -EINVAL;
+
/* to do pass other display configuration in furture */
if (hwmgr->hwmgr_func->store_cc6_data)
if (hwmgr == NULL || hwmgr->ps == NULL)
return -EINVAL;
+ /* do hwmgr finish*/
+ kfree(hwmgr->backend);
+
+ kfree(hwmgr->start_thermal_controller.function_list);
+
+ kfree(hwmgr->set_temperature_range.function_list);
+
kfree(hwmgr->ps);
kfree(hwmgr);
return 0;
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
int phm_find_boot_level(void *table,
uint8_t phases;
uint8_t cks_enable;
uint8_t cks_voffset;
+ uint32_t sclk_offset;
};
typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
table->Smio[level] |=
data->mvdd_voltage_table.entries[level].smio_low;
}
- table->SmioMask2 = data->vddci_voltage_table.mask_low;
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
}
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i].vddc -
(uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
}
if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
}
mem_level->MclkFrequency = clock;
- mem_level->StutterEnable = 0;
mem_level->EnabledForThrottle = 1;
mem_level->EnabledForActivity = 0;
mem_level->UpHyst = 0;
mem_level->VoltageDownHyst = 0;
mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
mem_level->StutterEnable = false;
-
mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
data->display_timing.num_existing_displays = info.display_count;
* a higher state by default such that we are not effected by
* up threshold or and MCLK DPM latency.
*/
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ levels[0].ActivityLevel = 0x1f;
CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
data->smc_state_table.MemoryDpmLevelCount =
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table", );
- } else {
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
us_mvdd = 0;
if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
table->VceLevelCount = (uint8_t)(mm_table->count);
table->VceBootLevel = 0;
table->VceLevel[count].MinVoltage = 0;
table->VceLevel[count].MinVoltage |=
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/*retrieve divider value for VBIOS */
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
table->SamuBootLevel = 0;
table->SamuLevelCount = (uint8_t)(mm_table->count);
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/* retrieve divider value for VBIOS */
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
table->UvdLevelCount = (uint8_t)(mm_table->count);
table->UvdBootLevel = 0;
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/* retrieve divider value for VBIOS */
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
}
+
return result;
}
static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
* if the part is SS or FF. if RO >= 1660MHz, part is FF.
*/
efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
+ ixSMU_EFUSE_0 + (67 * 4));
efuse &= 0xFF000000;
efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
- /* Populate Stretch amount */
- data->smc_state_table.ClockStretcherAmount = stretch_amount;
+ ro = efuse * (max -min)/255 + min;
/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
for (i = 0; i < sclk_table->count; i++) {
data->smc_state_table.Sclk_CKS_masterEn0_7 |=
sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
if (volt_without_cks >= volt_with_cks)
volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
}
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
+ data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
stretch_amount2 = 0;
return -EINVAL);
}
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
- GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
- if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
- && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (polaris10_clock_stretch_amount_conversion
- [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
- |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
- }
-
value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
value &= 0xFFFFFFFE;
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
return 0;
}
+
+int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, data->sram_end);
+
+ polaris10_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ data->sram_end);
+
+ result = polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, data->sram_end);
+ polaris10_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ data->sram_end);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+
/**
* Initializes the SMC table and uploads it
*
"Failed to populate Clock Stretcher Data Table!",
return result);
}
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
table->CurrSclkPllRange = 0xff;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t soft_register_value = 0;
+ uint32_t handshake_disables_offset = data->soft_regs_start
+ + offsetof(SMU74_SoftRegisters, HandshakeDisables);
/* enable SCLK dpm */
if (!data->sclk_dpm_key_disabled)
/* enable MCLK dpm */
if (0 == data->mclk_dpm_key_disabled) {
+/* Disable UVD - SMU handshake for MCLK. */
+ soft_register_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, handshake_disables_offset);
+ soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ handshake_disables_offset, soft_register_value);
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr->smumgr,
"Failed to enable MCLK DPM during DPM Start Function!",
return -1);
-
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
tmp_result = polaris10_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable SCLK control!", result = tmp_result);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
if (hwmgr->chip_id == CHIP_POLARIS11)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint16_t vv_id;
- uint16_t vddc = 0;
+ uint32_t vddc = 0;
uint16_t i, j;
uint32_t sclk = 0;
struct phm_ppt_v1_information *table_info =
continue);
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+ /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+ * real voltage level in unit of 0.01mv */
+ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
"Invalid VDDC value", result = -EINVAL;);
/* the voltage should not be zero nor equal to leakage ID */
return 0;
}
+int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ uint32_t i;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
+ if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+ return 0;
+
+ for (i = 0; i < lookup_table->count; i++) {
+ if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+ dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+
int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+ data->enable_tdc_limit_feature = true;
+ data->enable_pkg_pwr_tracking_feature = true;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->mclk_stutter_mode_threshold = 40000;
+
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
}
+ if (table_info->cac_dtp_table->usClockStretchAmount != 0)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+
polaris10_set_features_platform_caps(hwmgr);
+ polaris10_patch_voltage_workaround(hwmgr);
polaris10_init_dpm_defaults(hwmgr);
/* Get leakage voltage based on leakage ID. */
ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
+ PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
(ATOM_Tonga_MCLK_Dependency_Table *)
(((unsigned long)powerplay_table) +
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = mclk_dep_table->entries
[state_entry->ucMemoryClockIndexLow].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
[state_entry->ucEngineClockIndexLow].ulSclk;
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenLow);
[polaris10_power_state->performance_level_count++]);
performance_level->memory_clock = mclk_dep_table->entries
[state_entry->ucMemoryClockIndexHigh].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
+
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
[state_entry->ucEngineClockIndexHigh].ulSclk;
+
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenHigh);
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
switch (state->classification.ui_label) {
case PP_StateUILabel_Performance:
data->use_pcie_performance_levels = true;
-
for (i = 0; i < ps->performance_level_count; i++) {
if (data->pcie_gen_performance.max <
ps->performance_levels[i].pcie_gen)
ps->performance_levels[i].pcie_lane)
data->pcie_lane_performance.max =
ps->performance_levels[i].pcie_lane;
-
if (data->pcie_lane_performance.min >
ps->performance_levels[i].pcie_lane)
data->pcie_lane_performance.min =
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
if (!bgate) {
- data->smc_state_table.SamuBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
+ data->smc_state_table.SamuBootLevel = 0;
mm_boot_level_offset = data->dpm_table_start +
offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
mm_boot_level_offset /= 4;
return 0;
}
+static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+}
+
static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int tmp_result, result = 0;
"Failed to program memory timing parameters!",
result = tmp_result);
+ tmp_result = polaris10_notify_smc_display(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify smc display settings!",
+ result = tmp_result);
+
tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to unfreeze SCLK MCLK DPM!",
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
+
int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
polaris10_notify_smc_display_change(hwmgr, false);
- else
- polaris10_notify_smc_display_change(hwmgr, true);
return 0;
}
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
- polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
-
return 0;
}
return 0;
}
- data->need_long_memory_training = true;
+ data->need_long_memory_training = false;
/*
* PPMCME_FirmwareDescriptorEntry *pfd = NULL;
/* soft pptable for re-uploading into smu */
void *soft_pp_table;
+
+ uint32_t avfs_vdroop_override_setting;
+ bool apply_avfs_cks_off_voltage;
+ uint32_t frame_time_x2;
};
/* To convert to Q8.8 format for firmware */
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
- sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
int ret;
struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
return 0;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
0 : -1;
return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
}
+bool acpi_atcs_notify_pcie_device_ready(void *device)
+{
+ int32_t temp_buffer = 1;
+
+ return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
+ ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
+ &temp_buffer,
+ NULL,
+ 0,
+ sizeof(temp_buffer),
+ 0);
+}
+
+
int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
{
struct atcs_pref_req_input atcs_input;
int result;
struct cgs_system_info info = {0};
- if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
+ if( 0 != acpi_atcs_notify_pcie_device_ready(device))
return -EINVAL;
info.size = sizeof(struct cgs_system_info);
ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
&atcs_input,
&atcs_output,
- 0,
+ 1,
sizeof(atcs_input),
sizeof(atcs_output));
if (result != 0)
}
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
{
int result;
if (0 != result)
return result;
- *voltage = get_voltage_info_param_space.usVoltageLevel;
+ *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
return result;
}
return 0;
}
+
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+{
+ ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
+
+ if (param == NULL)
+ return -EINVAL;
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+ NULL, NULL, NULL);
+ if (!profile)
+ return -1;
+
+ param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
+ param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
+ param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
+ param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
+ param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
+ param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
+ param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
+ param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
+ param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
+ param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
+ param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+ param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
+ param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
+ param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
+ param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
+ param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+ param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
+
+ return 0;
+}
};
typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
+struct pp_atom_ctrl__avfs_parameters {
+ uint32_t ulAVFS_meanNsigma_Acontant0;
+ uint32_t ulAVFS_meanNsigma_Acontant1;
+ uint32_t ulAVFS_meanNsigma_Acontant2;
+ uint16_t usAVFS_meanNsigma_DC_tol_sigma;
+ uint16_t usAVFS_meanNsigma_Platform_mean;
+ uint16_t usAVFS_meanNsigma_Platform_sigma;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a0;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a1;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b;
+ uint16_t usMaxVoltage_0_25mv;
+ uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF;
+ uint8_t ucEnableGB_VDROOP_TABLE_CKSON;
+ uint8_t ucEnableGB_FUSE_TABLE_CKSOFF;
+ uint8_t ucEnableGB_FUSE_TABLE_CKSON;
+ uint16_t usPSM_Age_ComFactor;
+ uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage;
+ uint8_t ucReserved;
+};
+
extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level);
extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
+
+extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+
#endif
table->Smio[count] |=
data->mvdd_voltage_table.entries[count].smio_low;
}
- table->SmioMask2 = data->vddci_voltage_table.mask_low;
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
}
}
}
- /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
- /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
- /* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
- if (NULL != allowed_vdd_mclk_table) {
- /* Initialize Vddci DPM table based on allow Mclk values */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
- data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
- data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
- data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
- }
-
/* setup PCIE gen speed levels*/
tonga_setup_default_pcie_tables(hwmgr);
data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
} ATOM_Tonga_SCLK_Dependency_Table;
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ UCHAR ucVddInd; /* Base voltage */
+ USHORT usVddcOffset; /* Offset relative to base voltage */
+ ULONG ulSclk;
+ USHORT usEdcCurrent;
+ UCHAR ucReliabilityTemperature;
+ UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+ ULONG ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+ ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
typedef struct _ATOM_Tonga_PCIE_Record {
UCHAR ucPCIEGenSpeed;
UCHAR usPCIELaneWidth;
(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
if (0 != powerplay_table->usPPMTableOffset) {
- if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
+ if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnablePlatformPowerManagement);
}
static int get_sclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
- const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
+ const PPTable_Generic_SubTable_Header *sclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
- PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
- "Invalid PowerPlay Table!", return -1);
+ if (sclk_dep_table->ucRevId < 1) {
+ const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+ (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
- table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
- * sclk_dep_table->ucNumEntries;
+ PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+ "Invalid PowerPlay Table!", return -1);
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+ * tonga_table->ucNumEntries;
- if (NULL == sclk_table)
- return -ENOMEM;
+ sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
- memset(sclk_table, 0x00, table_size);
-
- sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
-
- for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- sclk_dep_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- sclk_dep_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- sclk_dep_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
- }
+ if (NULL == sclk_table)
+ return -ENOMEM;
+
+ memset(sclk_table, 0x00, table_size);
+
+ sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+ for (i = 0; i < tonga_table->ucNumEntries; i++) {
+ sclk_table->entries[i].vddInd =
+ tonga_table->entries[i].ucVddInd;
+ sclk_table->entries[i].vdd_offset =
+ tonga_table->entries[i].usVddcOffset;
+ sclk_table->entries[i].clk =
+ tonga_table->entries[i].ulSclk;
+ sclk_table->entries[i].cks_enable =
+ (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table->entries[i].cks_voffset =
+ (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ }
+ } else {
+ const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+ (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+
+ PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+ "Invalid PowerPlay Table!", return -1);
+
+ table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+ * polaris_table->ucNumEntries;
+
+ sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
+ if (NULL == sclk_table)
+ return -ENOMEM;
+
+ memset(sclk_table, 0x00, table_size);
+
+ sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+ for (i = 0; i < polaris_table->ucNumEntries; i++) {
+ sclk_table->entries[i].vddInd =
+ polaris_table->entries[i].ucVddInd;
+ sclk_table->entries[i].vdd_offset =
+ polaris_table->entries[i].usVddcOffset;
+ sclk_table->entries[i].clk =
+ polaris_table->entries[i].ulSclk;
+ sclk_table->entries[i].cks_enable =
+ (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table->entries[i].cks_voffset =
+ (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+ }
+ }
*pp_tonga_sclk_dep_table = sclk_table;
return 0;
const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
(const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
- const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+ const PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
+ if (NULL != hwmgr->soft_pp_table)
hwmgr->soft_pp_table = NULL;
- }
- if (NULL != pp_table_information->vdd_dep_on_sclk)
- pp_table_information->vdd_dep_on_sclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_sclk);
+ pp_table_information->vdd_dep_on_sclk = NULL;
- if (NULL != pp_table_information->vdd_dep_on_mclk)
- pp_table_information->vdd_dep_on_mclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_mclk);
+ pp_table_information->vdd_dep_on_mclk = NULL;
- if (NULL != pp_table_information->valid_mclk_values)
- pp_table_information->valid_mclk_values = NULL;
+ kfree(pp_table_information->valid_mclk_values);
+ pp_table_information->valid_mclk_values = NULL;
- if (NULL != pp_table_information->valid_sclk_values)
- pp_table_information->valid_sclk_values = NULL;
+ kfree(pp_table_information->valid_sclk_values);
+ pp_table_information->valid_sclk_values = NULL;
- if (NULL != pp_table_information->vddc_lookup_table)
- pp_table_information->vddc_lookup_table = NULL;
+ kfree(pp_table_information->vddc_lookup_table);
+ pp_table_information->vddc_lookup_table = NULL;
- if (NULL != pp_table_information->vddgfx_lookup_table)
- pp_table_information->vddgfx_lookup_table = NULL;
+ kfree(pp_table_information->vddgfx_lookup_table);
+ pp_table_information->vddgfx_lookup_table = NULL;
- if (NULL != pp_table_information->mm_dep_table)
- pp_table_information->mm_dep_table = NULL;
+ kfree(pp_table_information->mm_dep_table);
+ pp_table_information->mm_dep_table = NULL;
- if (NULL != pp_table_information->cac_dtp_table)
- pp_table_information->cac_dtp_table = NULL;
+ kfree(pp_table_information->cac_dtp_table);
+ pp_table_information->cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table)
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != pp_table_information->ppm_parameter_table)
- pp_table_information->ppm_parameter_table = NULL;
+ kfree(pp_table_information->ppm_parameter_table);
+ pp_table_information->ppm_parameter_table = NULL;
- if (NULL != pp_table_information->pcie_table)
- pp_table_information->pcie_table = NULL;
+ kfree(pp_table_information->pcie_table);
+ pp_table_information->pcie_table = NULL;
- if (NULL != hwmgr->pptable) {
- kfree(hwmgr->pptable);
- hwmgr->pptable = NULL;
- }
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
return result;
}
uint8_t ucVr_I2C_Line;
uint8_t ucPlx_I2C_address;
uint8_t ucPlx_I2C_Line;
+ uint32_t usBoostPowerLimit;
+ uint8_t ucCKS_LDO_REFSEL;
};
struct phm_ppm_table {
#pragma pack(push, 1)
+#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
#define PPSMC_SWSTATE_FLAG_DC 0x01
#define PPSMC_SWSTATE_FLAG_UVD 0x02
#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
+#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+
#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
extern int acpi_pcie_perf_request(void *device,
uint8_t perf_req,
bool advertise);
+extern bool acpi_atcs_notify_pcie_device_ready(void *device);
#define SMU__NUM_LCLK_DPM_LEVELS 8
#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define EXP_M1 35
+#define EXP_M2 92821
+#define EXP_B 66629747
+
+#define EXP_M1_1 365
+#define EXP_M2_1 658700
+#define EXP_B_1 305506134
+
+#define EXP_M1_2 189
+#define EXP_M2_2 379692
+#define EXP_B_2 194609469
+
+#define EXP_M1_3 99
+#define EXP_M2_3 217915
+#define EXP_B_3 122255994
+
+#define EXP_M1_4 51
+#define EXP_M2_4 122643
+#define EXP_B_4 74893384
+
+#define EXP_M1_5 423
+#define EXP_M2_5 1103326
+#define EXP_B_5 728122621
+
enum SID_OPTION {
SID_OPTION_HI,
SID_OPTION_LO,
uint32_t CacConfigTable;
uint32_t CacStatusTable;
-
uint32_t mcRegisterTable;
-
uint32_t mcArbDramTimingTable;
-
-
-
uint32_t PmFuseTable;
uint32_t Globals;
uint32_t ClockStretcherTable;
uint32_t VftTable;
- uint32_t Reserved[21];
+ uint32_t Reserved1;
+ uint32_t AvfsTable;
+ uint32_t AvfsCksOffGbvTable;
+ uint32_t AvfsMeanNSigma;
+ uint32_t AvfsSclkOffsetTable;
+ uint32_t Reserved[16];
uint32_t Signature;
};
struct SMU_ClockStretcherDataTableEntry {
uint8_t minVID;
uint8_t maxVID;
-
-
uint16_t setting;
};
typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
typedef struct VFT_TABLE_t VFT_TABLE_t;
+/* Total margin, root mean square of Fmax + DC + Platform */
+struct AVFS_Margin_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+ int32_t a0;
+ int32_t a1;
+ int32_t a2;
+ uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct AVFS_CksOff_Gbv_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_meanNsigma_t {
+ uint32_t Aconstant[3];
+ uint16_t DC_tol_sigma;
+ uint16_t Platform_mean;
+ uint16_t Platform_sigma;
+ uint16_t PSM_Age_CompFactor;
+ uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+ uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
#endif
typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
+struct SMU_QuadraticCoeffs {
+ int32_t m1;
+ uint32_t b;
+
+ int16_t m2;
+ uint8_t m1_shift;
+ uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
struct SMU74_Discrete_DpmTable {
SMU74_PIDController GraphicsPIDController;
uint8_t ThermOutPolarity;
uint8_t ThermOutMode;
uint8_t BootPhases;
- uint32_t Reserved[4];
+
+ uint8_t VRHotLevel;
+ uint8_t LdoRefSel;
+ uint8_t Reserved1[2];
+ uint16_t FanStartTemperature;
+ uint16_t FanStopTemperature;
+ uint16_t MaxVoltage;
+ uint16_t Reserved2;
+ uint32_t Reserved[1];
SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
SMU74_Discrete_MemoryLevel MemoryACPILevel;
uint32_t CurrSclkPllRange;
sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
+ GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+ SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
};
typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
-struct SMU_QuadraticCoeffs {
- int32_t m1;
- uint32_t b;
-
- int16_t m2;
- uint8_t m1_shift;
- uint8_t m2_shift;
-};
-typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
-
struct SMU74_Discrete_PmFuses {
uint8_t BapmVddCVidHiSidd[8];
uint8_t BapmVddCVidLoSidd[8];
#define DB_PCC_SHIFT 26
#define DB_EDC_SHIFT 27
+#define BTCGB0_Vdroop_Enable_MASK 0x1
+#define BTCGB1_Vdroop_Enable_MASK 0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT 0
+#define BTCGB1_Vdroop_Enable_SHIFT 1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+
#pragma pack(pop)
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
/* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
/* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
- { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } },
- { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } }
+ { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+ { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+ { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
+ { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+ { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
+ { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+ { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
+ { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
};
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
- {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
+ {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
/**
* Set the address for reading/writing the SMC SRAM space.
&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ uint32_t efuse;
+
+ efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse &= 0x00000001;
+ if (efuse)
+ return true;
+
+ return false;
+}
+
/**
* Send a message to the SMC, and wait for its response.
*
*/
int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
+ int ret;
+
if (!polaris10_is_smc_ram_running(smumgr))
return -1;
+
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Previous Message.\n");
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ if (ret != 1)
+ printk("\n failed to send pre message %x ret is %d \n", msg, ret);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Message.\n");
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send message %x ret is %d \n", msg, ret);
return 0;
}
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
(cgs_handle_t)smu_data->smu_buffer.handle);
return -1;);
+ if (polaris10_is_hw_avfs_present(smumgr))
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+ else
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
return 0;
}
int smum_fini(struct pp_smumgr *smumgr)
{
+ kfree(smumgr->device);
kfree(smumgr);
return 0;
}
static int tonga_smu_fini(struct pp_smumgr *smumgr)
{
+ struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
*
*/
+static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+ /* stop the controller on cleanup */
+ hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+ drm_crtc_cleanup(crtc);
+}
+
static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
+ .destroy = hdlcd_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
struct drm_display_mode *m = &crtc->state->adjusted_mode;
struct videomode vm;
- unsigned int polarities, line_length, err;
+ unsigned int polarities, err;
vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
if (m->flags & DRM_MODE_FLAG_PVSYNC)
polarities |= HDLCD_POLARITY_VSYNC;
- line_length = crtc->primary->state->fb->pitches[0];
-
/* Allow max number of outstanding requests and largest burst size */
hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
- hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
err = hdlcd_set_pxl_fmt(crtc);
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
clk_prepare_enable(hdlcd->clk);
+ hdlcd_crtc_mode_set_nofb(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
- drm_crtc_vblank_on(crtc);
}
static void hdlcd_crtc_disable(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- if (!crtc->primary->fb)
+ if (!crtc->state->active)
return;
- clk_disable_unprepare(hdlcd->clk);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
- drm_crtc_vblank_off(crtc);
+ clk_disable_unprepare(hdlcd->clk);
}
static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- unsigned long flags;
-
- if (crtc->state->event) {
- struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ if (event) {
crtc->state->event = NULL;
- event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- list_add_tail(&event->base.link, &hdlcd->event_list);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ u32 src_w, src_h;
+
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ /* we can't do any scaling of the plane source */
+ if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+ return -EINVAL;
+
return 0;
}
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
+ unsigned int depth, bpp;
+ u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
- if (!plane->state->crtc || !plane->state->fb)
+ if (!plane->state->fb)
return;
- hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+ drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
- scanout_start = gem->paddr;
+ scanout_start = gem->paddr + plane->state->fb->offsets[0] +
+ plane->state->crtc_y * plane->state->fb->pitches[0] +
+ plane->state->crtc_x * bpp / 8;
+
+ hdlcd = plane->dev->dev_private;
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
}
static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
- .prepare_fb = NULL,
- .cleanup_fb = NULL,
.atomic_check = hdlcd_plane_atomic_check,
.atomic_update = hdlcd_plane_atomic_update,
};
return plane;
}
-void hdlcd_crtc_suspend(struct drm_crtc *crtc)
-{
- hdlcd_crtc_disable(crtc);
-}
-
-void hdlcd_crtc_resume(struct drm_crtc *crtc)
-{
- hdlcd_crtc_enable(crtc);
-}
-
int hdlcd_setup_crtc(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm->dev_private;
atomic_set(&hdlcd->dma_end_count, 0);
#endif
- INIT_LIST_HEAD(&hdlcd->event_list);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(hdlcd->mmio)) {
goto setup_fail;
}
- pm_runtime_enable(drm->dev);
-
- pm_runtime_get_sync(drm->dev);
ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
- pm_runtime_put_sync(drm->dev);
if (ret < 0) {
DRM_ERROR("failed to install IRQ handler\n");
goto irq_fail;
atomic_inc(&hdlcd->vsync_count);
#endif
- if (irq_status & HDLCD_INTERRUPT_VSYNC) {
- bool events_sent = false;
- unsigned long flags;
- struct drm_pending_vblank_event *e, *t;
-
+ if (irq_status & HDLCD_INTERRUPT_VSYNC)
drm_crtc_handle_vblank(&hdlcd->crtc);
- spin_lock_irqsave(&drm->event_lock, flags);
- list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
- list_del(&e->base.link);
- drm_crtc_send_vblank_event(&hdlcd->crtc, e);
- events_sent = true;
- }
- if (events_sent)
- drm_crtc_vblank_put(&hdlcd->crtc);
- spin_unlock_irqrestore(&drm->event_lock, flags);
- }
-
/* acknowledge interrupt(s) */
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
static struct drm_info_list hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
};
static int hdlcd_debugfs_init(struct drm_minor *minor)
return -ENOMEM;
drm->dev_private = hdlcd;
+ dev_set_drvdata(dev, drm);
+
hdlcd_setup_mode_config(drm);
ret = hdlcd_load(drm, 0);
if (ret)
if (ret)
goto err_unload;
- dev_set_drvdata(dev, drm);
-
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
goto err_unregister;
}
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ goto err_pm_active;
+
+ pm_runtime_enable(dev);
+
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
err_vblank:
+ pm_runtime_disable(drm->dev);
+err_pm_active:
component_unbind_all(dev, drm);
err_unregister:
drm_dev_unregister(drm);
err_unload:
- pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
- pm_runtime_put_sync(drm->dev);
- pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
err_free:
+ dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
return ret;
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_crtc *crtc;
+ struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
- if (pm_runtime_suspended(dev))
+ if (!hdlcd)
return 0;
- drm_modeset_lock_all(drm);
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- hdlcd_crtc_suspend(crtc);
- drm_modeset_unlock_all(drm);
+ drm_kms_helper_poll_disable(drm);
+
+ hdlcd->state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(hdlcd->state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(hdlcd->state);
+ }
+
return 0;
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_crtc *crtc;
+ struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
- if (!pm_runtime_suspended(dev))
+ if (!hdlcd)
return 0;
- drm_modeset_lock_all(drm);
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- hdlcd_crtc_resume(crtc);
- drm_modeset_unlock_all(drm);
+ drm_atomic_helper_resume(drm, hdlcd->state);
+ drm_kms_helper_poll_enable(drm);
+ pm_runtime_set_active(dev);
+
return 0;
}
void __iomem *mmio;
struct clk *clk;
struct drm_fbdev_cma *fbdev;
- struct drm_framebuffer *fb;
- struct list_head event_list;
struct drm_crtc crtc;
struct drm_plane *plane;
+ struct drm_atomic_state *state;
#ifdef CONFIG_DEBUG_FS
atomic_t buffer_underrun_count;
atomic_t bus_error_count;
int hdlcd_setup_crtc(struct drm_device *dev);
void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
-void hdlcd_crtc_suspend(struct drm_crtc *crtc);
-void hdlcd_crtc_resume(struct drm_crtc *crtc);
#endif /* __HDLCD_DRV_H__ */
{
struct atmel_hlcdc_crtc_state *state;
- if (crtc->state && crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
-
if (crtc->state) {
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
kfree(state);
+ crtc->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+ if (!state)
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
state->output_mode = cur->output_mode;
if (!ret)
ret = atmel_hlcdc_check_endpoint(dev, &ep);
- of_node_put(ep_np);
- if (ret)
+ if (ret) {
+ of_node_put(ep_np);
return ret;
+ }
}
for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
if (!ret)
ret = atmel_hlcdc_attach_endpoint(dev, &ep);
- of_node_put(ep_np);
- if (ret)
+ if (ret) {
+ of_node_put(ep_np);
return ret;
+ }
}
return 0;
atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
factor_reg);
+ } else {
+ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
}
}
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
+ memset(&state->mode, 0, sizeof(state->mode));
+
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(&state->mode,
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state);
} else {
- memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
*/
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ unsigned crtc_mask = 0;
+ struct drm_crtc *crtc;
int ret;
+ bool global = false;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->acquire_ctx != state->acquire_ctx)
+ continue;
+
+ crtc_mask |= drm_crtc_mask(crtc);
+ crtc->acquire_ctx = NULL;
+ }
+
+ if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
+ global = true;
+
+ dev->mode_config.acquire_ctx = NULL;
+ }
retry:
drm_modeset_backoff(state->acquire_ctx);
- ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
if (ret)
goto retry;
+
+ drm_for_each_crtc(crtc, dev)
+ if (drm_crtc_mask(crtc) & crtc_mask)
+ crtc->acquire_ctx = state->acquire_ctx;
+
+ if (global)
+ dev->mode_config.acquire_ctx = state->acquire_ctx;
}
EXPORT_SYMBOL(drm_atomic_legacy_backoff);
goto out;
}
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-
/*
* Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a
if (value == 0)
return true;
- return _object_find(property->dev, value, property->values[0]) != NULL;
+ *ref = _object_find(property->dev, value, property->values[0]);
+ return *ref != NULL;
}
for (i = 0; i < property->num_values; i++)
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_crtc *new_crtc;
- struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_crtc **save_encoder_crtcs, *new_crtc;
+ struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
- struct drm_connector *save_connectors, *connector;
+ struct drm_connector *connector;
int count = 0, ro, fail = 0;
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
- save_encoders = kzalloc(dev->mode_config.num_encoder *
- sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders)
+ save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!save_encoder_crtcs)
return -ENOMEM;
- save_connectors = kzalloc(dev->mode_config.num_connector *
- sizeof(struct drm_connector), GFP_KERNEL);
- if (!save_connectors) {
- kfree(save_encoders);
+ save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!save_connector_encoders) {
+ kfree(save_encoder_crtcs);
return -ENOMEM;
}
*/
count = 0;
drm_for_each_encoder(encoder, dev) {
- save_encoders[count++] = *encoder;
+ save_encoder_crtcs[count++] = encoder->crtc;
}
count = 0;
drm_for_each_connector(connector, dev) {
- save_connectors[count++] = *connector;
+ save_connector_encoders[count++] = connector->encoder;
}
save_set.crtc = set->crtc;
mode_changed = true;
}
- /* take a reference on all connectors in set */
+ /* take a reference on all unbound connectors in set, reuse the
+ * already taken reference for bound connectors
+ */
for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro]->encoder)
+ continue;
drm_connector_reference(set->connectors[ro]);
}
}
}
- /* after fail drop reference on all connectors in save set */
- count = 0;
- drm_for_each_connector(connector, dev) {
- drm_connector_unreference(&save_connectors[count++]);
- }
-
- kfree(save_connectors);
- kfree(save_encoders);
+ kfree(save_connector_encoders);
+ kfree(save_encoder_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
drm_for_each_encoder(encoder, dev) {
- *encoder = save_encoders[count++];
+ encoder->crtc = save_encoder_crtcs[count++];
}
count = 0;
drm_for_each_connector(connector, dev) {
- *connector = save_connectors[count++];
+ connector->encoder = save_connector_encoders[count++];
}
- /* after fail drop reference on all connectors in set */
+ /* after fail drop reference on all unbound connectors in set, let
+ * bound connectors keep their reference
+ */
for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro]->encoder)
+ continue;
drm_connector_unreference(set->connectors[ro]);
}
save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
- kfree(save_connectors);
- kfree(save_encoders);
+ kfree(save_connector_encoders);
+ kfree(save_encoder_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0) {
- if (mgr->mst_state) {
- drm_dp_mst_reset_vcpi_slots(mgr, port);
- drm_dp_update_payload_part1(mgr);
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- }
+ drm_dp_mst_reset_vcpi_slots(mgr, port);
+ drm_dp_update_payload_part1(mgr);
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kref_put(&port->kref, drm_dp_free_mst_port);
err_fb_info_destroy:
drm_fb_helper_release_fbi(helper);
err_gem_free_object:
- dev->driver->gem_free_object(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
return cma_obj;
error:
- drm->driver->gem_free_object(&cma_obj->base);
+ drm_gem_object_unreference_unlocked(&cma_obj->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, gem_obj, handle);
- if (ret)
- goto err_handle_create;
-
/* drop reference from allocate - handle holds it now. */
drm_gem_object_unreference_unlocked(gem_obj);
+ if (ret)
+ return ERR_PTR(ret);
return cma_obj;
-
-err_handle_create:
- drm->driver->gem_free_object(gem_obj);
-
- return ERR_PTR(ret);
}
/**
if (out->status != MODE_OK)
goto out;
+ drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
+
ret = 0;
out:
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+ etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
/*
struct exynos_dp_device {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct drm_connector *connector;
struct drm_bridge *ptn_bridge;
struct drm_device *drm_dev;
struct device *dev;
static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
{
struct exynos_dp_device *dp = to_dp(plat_data);
- struct drm_connector *connector = &dp->connector;
+ struct drm_connector *connector = dp->connector;
struct drm_display_mode *mode;
int num_modes = 0;
int ret;
drm_connector_register(connector);
+ dp->connector = connector;
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
#include <drm/drmP.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
.timing_base = 0x0,
.has_clksel = 1,
.has_limited_fmt = 1,
- .has_hw_trigger = 1,
};
static struct fimd_driver_data exynos3_fimd_driver_data = {
.lcdblk_vt_shift = 24,
.lcdblk_bypass_shift = 15,
.lcdblk_mic_bypass_shift = 11,
- .trg_type = I80_HW_TRG,
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
.has_mic_bypass = 1,
.has_dp_clk = 1,
- .has_hw_trigger = 1,
- .has_trigger_per_te = 1,
};
struct fimd_context {
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
-#define G2D_SRC_STRIDE_REG 0x0308
+#define G2D_SRC_STRIDE 0x0308
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
-#define G2D_DST_STRIDE_REG 0x0408
+#define G2D_DST_STRIDE 0x0408
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
- case G2D_SRC_STRIDE_REG:
+ case G2D_SRC_STRIDE:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
- case G2D_DST_STRIDE_REG:
+ case G2D_DST_STRIDE:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
- case G2D_SRC_STRIDE_REG:
- case G2D_DST_STRIDE_REG:
+ case G2D_SRC_STRIDE:
+ case G2D_DST_STRIDE:
if (for_addr)
goto err;
state->v_ratio == (1 << 15))
height_ok = true;
- if (width_ok & height_ok)
+ if (width_ok && height_ok)
return 0;
DRM_DEBUG_KMS("scaling mode is not supported");
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
+ .max_register = 0x11fc,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
task = get_pid_task(file->pid, PIDTYPE_PID);
if (!task) {
ret = -ESRCH;
- goto out_put;
+ goto out_unlock;
}
seq_printf(m, "\nproc: %s\n", task->comm);
put_task_struct(task);
idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m);
}
+out_unlock:
mutex_unlock(&dev->filelist_mutex);
-out_put:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
+ } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_KBP;
+ DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ WARN_ON(!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
+ PCH_KBP, /* Kabypoint PCH */
PCH_NOP,
};
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define KBL_REVID_A0 0x0
+#define KBL_REVID_B0 0x1
+#define KBL_REVID_C0 0x2
+#define KBL_REVID_D0 0x3
+#define KBL_REVID_E0 0x4
+
+#define IS_KBL_REVID(p, since, until) \
+ (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ */
+ if (start < 4096 && (IS_GEN8(dev_priv) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv))
+ if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, iir);
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev))
+ else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define GEN8_CONFIG0 _MMIO(0xD00)
+#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
+#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define MASK_WAKEMEM (1<<13)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
/* GEN7 chicken */
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN _MMIO(0x7018)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+ dvo_timing->himage_lo;
+ panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+ dvo_timing->vimage_lo;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
}
if (bdb->version < 106) {
expected_size = 22;
- } else if (bdb->version < 109) {
+ } else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
return false;
}
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ /* FIXME maybe deal with port A as well? */
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ return true;
+ }
+
+ return false;
+}
+
/**
* intel_bios_is_port_edp - is the device in given port eDP
* @dev_priv: i915 device instance
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
-MODULE_FIRMWARE(I915_CSR_SKL);
-MODULE_FIRMWARE(I915_CSR_BXT);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
-#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
char substepping;
};
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
static const struct stepping_info kbl_stepping_info[] = {
- {'H', '0'}, {'I', '0'}
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
csr->version = css_header->version;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ required_min_version = KBL_CSR_VERSION_REQUIRED;
+ } else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED;
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
+ bool using_ssc_source = false;
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
can_ssc = true;
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
- has_panel, has_lvds, has_ck505);
+ /* Check if any DPLLs are using the SSC source */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ u32 temp = I915_READ(PCH_DPLL(i));
+
+ if (!(temp & DPLL_VCO_ENABLE))
+ continue;
+
+ if ((temp & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ using_ssc_source = true;
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- } else {
- final |= DREF_SSC_SOURCE_DISABLE;
- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ } else if (using_ssc_source) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+ final |= DREF_SSC1_ENABLE;
}
if (final == val)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling SSC entirely\n");
+ DRM_DEBUG_KMS("Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
- /* Turn off the SSC source */
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_DISABLE;
+ if (!using_ssc_source) {
+ DRM_DEBUG_KMS("Disabling SSC source\n");
- /* Turn off SSC1 */
- val &= ~DREF_SSC1_ENABLE;
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
}
BUG_ON(val != final);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
DRM_ERROR("FDI mPHY reset assert timeout\n");
tmp = I915_READ(SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
}
ret = intel_color_check(crtc, crtc_state);
if (ret)
return ret;
+
+ /*
+ * Changing color management on Intel hardware is
+ * handled as part of planes update.
+ */
+ crtc_state->planes_changed = true;
}
ret = 0;
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ bool has_edp, has_port;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- /* eDP not supported on port D, so don't check VBT */
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev, CHV_DP_D, PORT_D);
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
intel_dsi_init(dev);
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
else
- done = wait_for_atomic(C, 10) == 0;
+ done = wait_for(C, 10) == 0;
if (!done)
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
has_aux_irq);
intel_dp->detect_done = false;
- if (intel_connector->detect_edid)
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
return connector_status_connected;
else
return connector_status_disconnected;
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
- struct intel_dp *intel_dp;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!HAS_DDI(dev_priv))
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
- intel_dp = enc_to_intel_dp(encoder);
-
pps_lock(intel_dp);
/*
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
- /* indicate that we need to restart link training */
- intel_dp->train_set_valid = false;
-
intel_dp_long_pulse(intel_dp->attached_connector);
if (intel_dp->is_mst)
ret = IRQ_HANDLED;
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev,
dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode)
+ if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ }
}
mutex_unlock(&dev->mode_config.mutex);
return false;
}
-void
-intel_dp_init(struct drm_device *dev,
- i915_reg_t output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ i915_reg_t output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
intel_dp_reset_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
- if (!intel_dp->train_set_valid)
- memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
return intel_dp_set_link_train(intel_dp, dp_train_pat);
}
break;
}
- /*
- * if we used previously trained voltage and pre-emphasis values
- * and we don't get clock recovery, reset link training values
- */
- if (intel_dp->train_set_valid) {
- DRM_DEBUG_KMS("clock recovery not ok, reset");
- /* clear the flag as we are not reusing train set */
- intel_dp->train_set_valid = false;
- if (!intel_dp_reset_link_train(intel_dp,
- DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE)) {
- DRM_ERROR("failed to enable link training\n");
- return;
- }
- continue;
- }
-
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
- intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
- intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
intel_dp_set_idle_link_train(intel_dp);
- if (channel_eq) {
- intel_dp->train_set_valid = true;
+ if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
- }
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
DPLL_ID_PCH_PLL_B);
}
+ if (!pll)
+ return NULL;
+
/* reference the pll */
intel_reference_shared_dpll(pll, crtc_state);
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
- if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_LOCK), 200))
+ if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ 200))
DRM_ERROR("PLL %d not locked\n", port);
/*
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
- bool train_set_valid;
-
/* Displayport compliance testing */
unsigned long compliance_test_type;
unsigned long compliance_test_data;
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
goto err;
}
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_dsi_add_properties(intel_connector);
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
- bool enable_by_default = IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv);
+ bool enable_by_default = IS_BROADWELL(dev_priv);
if (intel_vgpu_active(dev_priv->dev)) {
fbc->no_fbc_reason = "VGPU is active";
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
uint32_t *const batch,
uint32_t index)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl
+ * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
{
int ret;
struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
return ret;
index = ret;
+ /* WaClearSlmSpaceAtContextSwitch:kbl */
+ /* Actual scratch location is at 128 bytes offset */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+ uint32_t scratch_addr
+ = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa = false;
+ bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
+ int len;
flags |= PIPE_CONTROL_CS_STALL;
*/
if (IS_GEN9(engine->dev))
vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
}
- ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ ret = intel_ring_begin(request, len);
if (ret)
return ret;
intel_logical_ring_emit(ringbuf, 0);
}
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
+
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_advance(ringbuf);
return 0;
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
goto out;
}
}
return -ENODEV;
}
+ /*
+ * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+ * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+ * vswing instead. Low vswing results in some display flickers, so
+ * let's simply ignore the OpRegion panel type on SKL for now.
+ */
+ if (IS_SKYLAKE(dev)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
return ret - 1;
}
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+ } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+ I915_WRITE(GEN8_CONFIG0,
+ I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+ /* WaEnableChickenDCPR:skl,bxt,kbl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
+}
+
static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ gen9_init_clock_gating(dev);
+
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
}
}
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaDisableSDEUnitClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableGamClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaFbcNukeOnHostModify:kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaFbcNukeOnHostModify:skl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
int ret;
- /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl */
+ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaClearFlowControlGpgpuContextSave:skl,bxt */
- /* WaDisablePartialInstShootdown:skl,bxt */
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt */
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
- /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
- /* Wa4x4STCOptimizationDisable:skl,bxt */
- /* WaDisablePartialResolveInVc:skl,bxt */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
- tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- /* WaDisableSTUnitPowerOptimization:skl,bxt */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
- /* WaOCLCoherentLineFlush:skl,bxt */
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ if (ret)
+ return ret;
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- /* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
- }
-
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
}
+ /* WaInsertDummyPushConstPs:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaInsertDummyPushConstPs:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
return 0;
}
if (IS_BROXTON(dev))
return bxt_init_workarounds(engine);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_init_workarounds(engine);
+
return 0;
}
u8 vsync_off:4;
u8 rsvd0:6;
u8 hsync_off_hi:2;
- u8 h_image;
- u8 v_image;
- u8 max_hv;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
u8 h_border;
u8 v_border;
u8 rsvd1:3;
return NULL;
}
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin)
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+ int hsync_pin, int vsync_pin, u32 bus_flags)
{
struct imx_drm_crtc_helper_funcs *helper;
struct imx_drm_crtc *imx_crtc;
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
return helper->set_interface_pix_fmt(encoder->crtc,
- bus_format, hsync_pin, vsync_pin);
+ bus_format, hsync_pin, vsync_pin,
+ bus_flags);
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
{
- return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
+ return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE);
}
EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin);
+ u32 bus_format, int hsync_pin, int vsync_pin,
+ u32 bus_flags);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
- u32 bus_format, int hsync_pin, int vsync_pin);
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+ int hsync_pin, int vsync_pin, u32 bus_flags);
int imx_drm_set_bus_format(struct drm_encoder *encoder,
u32 bus_format);
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
struct drm_encoder encoder;
struct drm_panel *panel;
struct device_node *child;
+ struct i2c_adapter *ddc;
int chno;
void *edid;
int edid_len;
return num_modes;
}
+ if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
+ imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
+
if (imx_ldb_ch->edid) {
drm_mode_connector_update_edid_property(connector,
imx_ldb_ch->edid);
for_each_child_of_node(np, child) {
struct imx_ldb_channel *channel;
- struct device_node *port;
+ struct device_node *ddc_node;
+ struct device_node *ep;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
* The output port is port@4 with an external 4-port mux or
* port@2 with the internal 2-port mux.
*/
- port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
- if (port) {
- struct device_node *endpoint, *remote;
-
- endpoint = of_get_child_by_name(port, "endpoint");
- if (endpoint) {
- remote = of_graph_get_remote_port_parent(endpoint);
- if (remote)
- channel->panel = of_drm_find_panel(remote);
- else
- return -EPROBE_DEFER;
- if (!channel->panel) {
- dev_err(dev, "panel not found: %s\n",
- remote->full_name);
- return -EPROBE_DEFER;
- }
+ ep = of_graph_get_endpoint_by_regs(child,
+ imx_ldb->lvds_mux ? 4 : 2,
+ -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (remote)
+ channel->panel = of_drm_find_panel(remote);
+ else
+ return -EPROBE_DEFER;
+ of_node_put(remote);
+ if (!channel->panel) {
+ dev_err(dev, "panel not found: %s\n",
+ remote->full_name);
+ return -EPROBE_DEFER;
}
}
- edidp = of_get_property(child, "edid", &channel->edid_len);
- if (edidp) {
- channel->edid = kmemdup(edidp, channel->edid_len,
- GFP_KERNEL);
- } else if (!channel->panel) {
- ret = of_get_drm_display_mode(child, &channel->mode, 0);
- if (!ret)
- channel->mode_valid = 1;
+ ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+ if (ddc_node) {
+ channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
+ if (!channel->ddc) {
+ dev_warn(dev, "failed to get ddc i2c adapter\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (!channel->ddc) {
+ /* if no DDC available, fallback to hardcoded EDID */
+ dev_dbg(dev, "no ddc available\n");
+
+ edidp = of_get_property(child, "edid",
+ &channel->edid_len);
+ if (edidp) {
+ channel->edid = kmemdup(edidp,
+ channel->edid_len,
+ GFP_KERNEL);
+ } else if (!channel->panel) {
+ /* fallback to display-timings node */
+ ret = of_get_drm_display_mode(child,
+ &channel->mode,
+ OF_USE_NATIVE_MODE);
+ if (!ret)
+ channel->mode_valid = 1;
+ }
}
channel->bus_format = of_get_bus_format(dev, child);
channel->encoder.funcs->destroy(&channel->encoder);
kfree(channel->edid);
+ i2c_put_adapter(channel->ddc);
}
}
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
- tve->hsync_pin, tve->vsync_pin);
+ imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
+ tve->hsync_pin, tve->vsync_pin,
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE);
break;
case TVE_MODE_TVOUT:
imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
struct ipu_flip_work *flip_work;
int irq;
u32 bus_format;
+ u32 bus_flags;
int di_hsync_pin;
int di_vsync_pin;
};
else
sig_cfg.clkflags = 0;
- sig_cfg.enable_pol = 1;
- sig_cfg.clk_pol = 0;
+ sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+ /* Default to driving pixel data on negative clock edges */
+ sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+ DRM_BUS_FLAG_PIXDATA_POSEDGE);
sig_cfg.bus_format = ipu_crtc->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin)
+ u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
ipu_crtc->bus_format = bus_format;
+ ipu_crtc->bus_flags = bus_flags;
ipu_crtc->di_hsync_pin = hsync_pin;
ipu_crtc->di_vsync_pin = vsync_pin;
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
if (crtc != plane->crtc)
dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
plane->crtc, crtc);
- plane->crtc = crtc;
if (!ipu_plane->enabled)
ipu_plane_enable(ipu_plane);
kfree(ipu_plane);
}
-static struct drm_plane_funcs ipu_plane_funcs = {
+static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = ipu_update_plane,
.disable_plane = ipu_disable_plane,
.destroy = ipu_plane_destroy,
void *edid;
int edid_len;
u32 bus_format;
- int mode_valid;
struct drm_display_mode mode;
struct drm_panel *panel;
};
num_modes = drm_add_edid_modes(connector, imxpd->edid);
}
- if (imxpd->mode_valid) {
- struct drm_display_mode *mode = drm_mode_create(connector->dev);
-
- if (!mode)
- return -EINVAL;
- drm_mode_copy(mode, &imxpd->mode);
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
-
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
- imx_drm_set_bus_format(encoder, imxpd->bus_format);
+ imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
+ imxpd->connector.display_info.bus_flags);
}
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
- struct device_node *port;
+ struct device_node *ep;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
}
/* port@1 is the output port */
- port = of_graph_get_port_by_id(np, 1);
- if (port) {
- struct device_node *endpoint, *remote;
-
- endpoint = of_get_child_by_name(port, "endpoint");
- if (endpoint) {
- remote = of_graph_get_remote_port_parent(endpoint);
- if (remote)
- imxpd->panel = of_drm_find_panel(remote);
- if (!imxpd->panel)
- return -EPROBE_DEFER;
+ ep = of_graph_get_endpoint_by_regs(np, 1, -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (remote) {
+ imxpd->panel = of_drm_find_panel(remote);
+ of_node_put(remote);
}
+ if (!imxpd->panel)
+ return -EPROBE_DEFER;
}
imxpd->dev = dev;
unsigned long pll_rate;
unsigned int factor;
- if (!dpi) {
- dev_err(dpi->dev, "invalid argument\n");
- return -EINVAL;
- }
-
pix_rate = 1000UL * mode->clock;
if (mode->clock <= 74000)
factor = 8 * 3;
{
drm_encoder_cleanup(&dsi->encoder);
/* Skip connector cleanup if creation was delegated to the bridge */
- if (dsi->conn.dev) {
- drm_connector_unregister(&dsi->conn);
+ if (dsi->conn.dev)
drm_connector_cleanup(&dsi->conn);
- }
}
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
}
}
- fvv = pllreffreq * testn / testm;
+ fvv = pllreffreq * (n + 1) / (m + 1);
fvv = (fvv - 800000) / 50000;
if (fvv > 15)
WREG_DAC(MGA1064_PIX_PLLC_M, m);
WREG_DAC(MGA1064_PIX_PLLC_N, n);
WREG_DAC(MGA1064_PIX_PLLC_P, p);
+
+ if (mdev->unique_rev_id >= 0x04) {
+ WREG_DAC(0x1a, 0x09);
+ msleep(20);
+ WREG_DAC(0x1a, 0x01);
+
+ }
+
return 0;
}
}
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
+ if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ if (IS_ERR(fbi->screen_base)) {
+ ret = PTR_ERR(fbi->screen_base);
+ goto fail_unlock;
+ }
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
return ERR_CAST(pages);
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (msm_obj->vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
}
return msm_obj->vaddr;
}
submit->dev = dev;
submit->gpu = gpu;
+ submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
+ INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ submit->bos[i].flags = 0;
+
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ continue;
+
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
}
ring->start = msm_gem_vaddr_locked(ring->bo);
+ if (IS_ERR(ring->start)) {
+ ret = PTR_ERR(ring->start);
+ goto fail;
+ }
ring->end = ring->start + (size / 4);
ring->cur = ring->start;
NVKM_SUBDEV_MC,
NVKM_SUBDEV_BUS,
NVKM_SUBDEV_TIMER,
+ NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_FB,
NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
struct nvbios_ocfg {
- u16 match;
+ u8 proto;
+ u8 flags;
u16 clkcmp[2];
};
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
#endif
if (ret)
goto fini;
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
fini:
uint32_t fg;
uint32_t bg;
uint32_t dsize;
- uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
if (ret)
return ret;
- width = ALIGN(image->width, 8);
- dsize = ALIGN(width * image->height, 32) >> 5;
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ dsize = ALIGN(image->width * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
- .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
+ .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
};
int
nvkm-y += nvkm/engine/disp/sornv50.o
nvkm-y += nvkm/engine/disp/sorg94.o
nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/dport.o
mask |= 0x0001 << or;
mask |= 0x0100 << head;
+
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
.outp.internal.crt = nv50_dac_output_new,
.outp.internal.tmds = nv50_sor_output_new,
.outp.internal.lvds = nv50_sor_output_new,
- .outp.internal.dp = gf119_sor_dp_new,
+ .outp.internal.dp = gm107_sor_dp_new,
.dac.nr = 3,
.dac.power = nv50_dac_power,
.dac.sense = nv50_dac_sense,
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
if (outp->info.location == 0) {
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
} else {
pclk = pclk / 2;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
-int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
- struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
#endif
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 loff = gf119_sor_loff(outp);
- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+ const u32 soff = gf119_sor_soff(outp);
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
return 0;
}
-static int
+int
gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 soff = outp->base.or * 0x800;
+ const u32 data = 0x01010101 * pattern;
+ if (outp->base.info.sorconf.link & 1)
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+ else
+ nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+ return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+ .pattern = gm107_sor_dp_pattern,
+ .lnk_pwr = g94_sor_dp_lnk_pwr,
+ .lnk_ctl = gf119_sor_dp_lnk_ctl,
+ .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
return lane * 0x08;
}
-static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
- struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 soff = gm200_sor_soff(outp);
- const u32 data = 0x01010101 * pattern;
- if (outp->base.info.sorconf.link & 1)
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
- else
- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
- return 0;
-}
-
static int
gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
static const struct nvkm_output_dp_func
gm200_sor_dp_func = {
- .pattern = gm200_sor_dp_pattern,
+ .pattern = gm107_sor_dp_pattern,
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
}
static const struct nvkm_enum gf100_mp_warp_error[] = {
- { 0x00, "NO_ERROR" },
- { 0x01, "STACK_MISMATCH" },
+ { 0x01, "STACK_ERROR" },
+ { 0x02, "API_STACK_ERROR" },
+ { 0x03, "RET_EMPTY_STACK_ERROR" },
+ { 0x04, "PC_WRAP" },
{ 0x05, "MISALIGNED_PC" },
- { 0x08, "MISALIGNED_GPR" },
- { 0x09, "INVALID_OPCODE" },
- { 0x0d, "GPR_OUT_OF_BOUNDS" },
- { 0x0e, "MEM_OUT_OF_BOUNDS" },
- { 0x0f, "UNALIGNED_MEM_ACCESS" },
+ { 0x06, "PC_OVERFLOW" },
+ { 0x07, "MISALIGNED_IMMC_ADDR" },
+ { 0x08, "MISALIGNED_REG" },
+ { 0x09, "ILLEGAL_INSTR_ENCODING" },
+ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+ { 0x0b, "ILLEGAL_INSTR_PARAM" },
+ { 0x0c, "INVALID_CONST_ADDR" },
+ { 0x0d, "OOR_REG" },
+ { 0x0e, "OOR_ADDR" },
+ { 0x0f, "MISALIGNED_ADDR" },
{ 0x10, "INVALID_ADDR_SPACE" },
- { 0x11, "INVALID_PARAM" },
+ { 0x11, "ILLEGAL_INSTR_PARAM2" },
+ { 0x12, "INVALID_CONST_ADDR_LDC" },
+ { 0x13, "GEOMETRY_SM_ERROR" },
+ { 0x14, "DIVERGENT" },
+ { 0x15, "WARP_EXIT" },
{}
};
static const struct nvkm_bitfield gf100_mp_global_error[] = {
+ { 0x00000001, "SM_TO_SM_FAULT" },
+ { 0x00000002, "L1_ERROR" },
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
- { 0x00000008, "OUT_OF_STACK_SPACE" },
+ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+ { 0x00000010, "BPT_INT" },
+ { 0x00000020, "BPT_PAUSE" },
+ { 0x00000040, "SINGLE_STEP_COMPLETE" },
+ { 0x20000000, "ECC_SEC_ERROR" },
+ { 0x40000000, "ECC_DED_ERROR" },
+ { 0x80000000, "TIMEOUT" },
{}
};
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
- info->match = nvbios_rd16(bios, data + 0x00);
+ info->proto = nvbios_rd08(bios, data + 0x00);
+ info->flags = nvbios_rd16(bios, data + 0x01);
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
}
}
u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data, idx = 0;
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
- if (info->match == type)
+ if ((info->proto == proto || info->proto == 0xff) &&
+ (info->flags == flags))
break;
}
return data;
struct pwr_rail_t *r = &stbl.rail[i];
struct nvkm_iccsense_rail *rail;
struct nvkm_iccsense_sensor *sensor;
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
if (!r->mode || r->resistor_mohm == 0)
continue;
if (!sensor)
continue;
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
-
switch (sensor->type) {
case NVBIOS_EXTDEV_INA209:
if (r->rail != 0)
continue;
- rail->read = nvkm_iccsense_ina209_read;
+ read = nvkm_iccsense_ina209_read;
break;
case NVBIOS_EXTDEV_INA219:
if (r->rail != 0)
continue;
- rail->read = nvkm_iccsense_ina219_read;
+ read = nvkm_iccsense_ina219_read;
break;
case NVBIOS_EXTDEV_INA3221:
if (r->rail >= 3)
continue;
- rail->read = nvkm_iccsense_ina3221_read;
+ read = nvkm_iccsense_ina3221_read;
break;
default:
continue;
}
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
sensor->rail_mask |= 1 << r->rail;
+ rail->read = read;
rail->sensor = sensor;
rail->idx = r->rail;
rail->mohm = r->resistor_mohm;
}
static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = <c->subdev;
struct nvkm_device *device = subdev->device;
- u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
+ u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
u32 stat = nvkm_rd32(device, base + 0x00c);
if (stat) {
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
- gm107_ltc_lts_isr(ltc, c, s);
+ gm107_ltc_intr_lts(ltc, c, s);
mask &= ~(1 << c);
}
}
gm200_ltc = {
.oneinit = gm200_ltc_oneinit,
.init = gm200_ltc_init,
- .intr = gm107_ltc_intr, /*XXX: not validated */
+ .intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc = 16,
tristate "OMAP DRM"
depends on DRM
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+ select OMAP2_DSS
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
* the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <video/omapdss.h>
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/sched.h>
#include <linux/backlight.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of_gpio.h>
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
- int r;
if (dsi->vdds_dsi_reg != NULL)
return 0;
return PTR_ERR(vdds_dsi);
}
- if (regulator_can_change_voltage(vdds_dsi)) {
- r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
- if (r) {
- devm_regulator_put(vdds_dsi);
- DSSERR("can't set the DSI regulator voltage\n");
- return r;
- }
- }
-
dsi->vdds_dsi_reg = vdds_dsi;
return 0;
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
return PTR_ERR(reg);
}
- if (regulator_can_change_voltage(reg)) {
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
- }
-
hdmi.vdda_reg = reg;
return 0;
static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
{
DSSDBG("Enter hdmi_core_powerdown_disable\n");
- REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
}
static void hdmi_core_swreset_release(struct hdmi_core_data *core)
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
return PTR_ERR(reg);
}
- if (regulator_can_change_voltage(reg)) {
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
- }
-
hdmi.vdda_reg = reg;
return 0;
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned ss_scl_high = 4000; /* ns */
- const unsigned ss_scl_low = 4700; /* ns */
+ const unsigned ss_scl_high = 4600; /* ns */
+ const unsigned ss_scl_low = 5400; /* ns */
const unsigned fs_scl_high = 600; /* ns */
const unsigned fs_scl_low = 1300; /* ns */
const unsigned sda_hold = 1000; /* ns */
c = (ptr[1] >> 6) & 0x3;
m = (ptr[1] >> 4) & 0x3;
- r = (ptr[1] >> 0) & 0x3;
+ r = (ptr[1] >> 0) & 0xf;
itc = (ptr[2] >> 7) & 0x1;
ec = (ptr[2] >> 4) & 0x7;
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include "dss.h"
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include "dss.h"
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/seq_file.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <linux/module.h>
#include <linux/platform_device.h> /* platform_device() */
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/seq_file.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
- if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ && !radeon_crtc->ss_enabled)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
- if (ASIC_IS_AVIVO(rdev))
+ if (rdev->family >= CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
/*
* GPU helpers function.
*/
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* radeon_card_posted - check if the hw has already been initialized
*
{
uint32_t reg;
+ /* for pass through, always force asic_init */
+ if (radeon_device_is_virtual())
+ return false;
+
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (freeze && rdev->family >= CHIP_R600) {
+ if (freeze && rdev->family >= CHIP_CEDAR) {
rdev->asic->asic_reset(rdev, true);
pci_restore_state(dev->pdev);
} else if (suspend) {
mixer->status = STI_MIXER_DISABLING;
}
-static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* accept the provided drm_display_mode, do not fix it up */
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- return true;
-}
-
static int
sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
- .mode_fixup = sti_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = sti_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
config DRM_SUN4I
tristate "DRM Support for Allwinner A10 Display Engine"
- depends on DRM && ARM
+ depends on DRM && ARM && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
/* Get the physical address of the buffer in memory */
gem = drm_fb_cma_get_gem_obj(fb, 0);
- DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr);
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
/* Compute the start of the displayed memory */
bpp = drm_format_plane_cpp(fb->pixel_format, 0);
paddr += (state->src_x >> 16) * bpp;
paddr += (state->src_y >> 16) * fb->pitches[0];
- DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr);
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = paddr << 3;
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
sun4i_tcon_disable(drv->tcon);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
}
static void sun4i_crtc_enable(struct drm_crtc *crtc)
static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate);
+ unsigned long best_parent = 0;
+ u8 best_div = 1;
+ int i;
+
+ for (i = 6; i < 127; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
+ ideal);
+
+ if (rounded == ideal) {
+ best_parent = rounded;
+ best_div = i;
+ goto out;
+ }
+
+ if ((rounded < ideal) && (rounded > best_parent)) {
+ best_parent = rounded;
+ best_div = i;
+ }
+ }
+
+out:
+ *parent_rate = best_parent;
+
+ return best_parent / best_div;
}
static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
- int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ u8 div = parent_rate / rate;
return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
GENMASK(6, 0), div);
const char *clk_name, *parent_name;
struct clk_init_data init;
struct sun4i_dclk *dclk;
+ int ret;
parent_name = __clk_get_name(tcon->sclk0);
- of_property_read_string_index(dev->of_node, "clock-output-names", 0,
- &clk_name);
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", 0,
+ &clk_name);
+ if (ret)
+ return ret;
dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
if (!dclk)
init.ops = &sun4i_dclk_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
dclk->regmap = tcon->regs;
dclk->hw.init = &init;
#include "sun4i_layer.h"
#include "sun4i_tcon.h"
-static int sun4i_drv_connector_plug_all(struct drm_device *drm)
-{
- struct drm_connector *connector, *failed;
- int ret;
-
- mutex_lock(&drm->mode_config.mutex);
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- ret = drm_connector_register(connector);
- if (ret) {
- failed = connector;
- goto err;
- }
- }
- mutex_unlock(&drm->mode_config.mutex);
- return 0;
-
-err:
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- if (failed == connector)
- break;
-
- drm_connector_unregister(connector);
- }
- mutex_unlock(&drm->mode_config.mutex);
-
- return ret;
-}
-
static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
{
struct sun4i_drv *drv = drm->dev_private;
/* Frame Buffer Operations */
/* VBlank Operations */
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = sun4i_drv_enable_vblank,
.disable_vblank = sun4i_drv_disable_vblank,
};
+static void sun4i_remove_framebuffers(void)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ /* The framebuffer can be located anywhere in RAM */
+ ap->ranges[0].base = 0;
+ ap->ranges[0].size = ~0;
+
+ remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+ kfree(ap);
+}
+
static int sun4i_drv_bind(struct device *dev)
{
struct drm_device *drm;
}
drm->irq_enabled = true;
+ /* Remove early framebuffers (ie. simplefb) */
+ sun4i_remove_framebuffers();
+
/* Create our framebuffer */
drv->fbdev = sun4i_framebuffer_init(drm);
if (IS_ERR(drv->fbdev)) {
if (ret)
goto free_drm;
- ret = sun4i_drv_connector_plug_all(drm);
+ ret = drm_connector_register_all(drm);
if (ret)
goto unregister_drm;
{
struct drm_device *drm = dev_get_drvdata(dev);
+ drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
count += sun4i_drv_add_endpoints(&pdev->dev, &match,
pipeline);
+ of_node_put(pipeline);
DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
count, i);
static int sun4i_rgb_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
+ unsigned long rate = mode->clock * 1000;
+ long rounded_rate;
DRM_DEBUG_DRIVER("Validating modes...\n");
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ rounded_rate = clk_round_rate(tcon->dclk, rate);
+ if (rounded_rate < rate)
+ return MODE_CLOCK_LOW;
+
+ if (rounded_rate > rate)
+ return MODE_CLOCK_HIGH;
+
+ DRM_DEBUG_DRIVER("Clock rate OK\n");
+
return MODE_OK;
}
int ret;
/* If we don't have a panel, there's no point in going on */
- if (!tcon->panel)
+ if (IS_ERR(tcon->panel))
return -ENODEV;
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
remote = of_graph_get_remote_port_parent(end_node);
if (!remote) {
- DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+ DRM_DEBUG_DRIVER("Unable to parse remote node\n");
return ERR_PTR(-EINVAL);
}
- return of_drm_find_panel(remote);
+ return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
}
static int sun4i_tcon_bind(struct device *dev, struct device *master,
return 0;
}
- return sun4i_rgb_init(drm);
+ ret = sun4i_rgb_init(drm);
+ if (ret < 0)
+ goto err_free_clocks;
+
+ return 0;
err_free_clocks:
sun4i_tcon_free_clocks(tcon);
* Defer the probe.
*/
panel = sun4i_tcon_find_panel(node);
- if (IS_ERR(panel)) {
- /*
- * If we don't have a panel endpoint, just go on
- */
- if (PTR_ERR(panel) != -ENODEV)
- return -EPROBE_DEFER;
+
+ /*
+ * If we don't have a panel endpoint, just go on
+ */
+ if (PTR_ERR(panel) == -EPROBE_DEFER) {
+ DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+ return -EPROBE_DEFER;
}
return component_add(&pdev->dev, &sun4i_tcon_ops);
return ret;
}
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
return false;
}
+EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
- vc4_hvs_dump_state(dev);
- }
-
if (crtc->state->event) {
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
vc4_crtc->event = crtc->state->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
}
}
{
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ u32 chan = vc4_crtc->channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (vc4_crtc->event) {
+ if (vc4_crtc->event &&
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ drm_crtc_vblank_put(crtc);
drm_framebuffer_unreference(flip_state->fb);
kfree(flip_state);
return ret;
}
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
/* Immediately update the plane's legacy fb pointer, so that later
* modeset prep sees the state that will be present when the semaphore
* is released.
};
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
};
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
return -ENOMEM;
/* Make sure that any outstanding modesets have finished. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
+ if (nonblock) {
+ ret = down_trylock(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return -EBUSY;
+ }
+ } else {
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
+ }
}
ret = drm_atomic_helper_prepare_planes(dev, state);
#define SCALER_DISPLACT0 0x00000030
#define SCALER_DISPLACT1 0x00000034
#define SCALER_DISPLACT2 0x00000038
+#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
+ (x) * (SCALER_DISPLACT1 - \
+ SCALER_DISPLACT0))
+
#define SCALER_DISPCTRL0 0x00000040
# define SCALER_DISPCTRLX_ENABLE BIT(31)
# define SCALER_DISPCTRLX_RESET BIT(30)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+
if (!ret)
vmw_bo_pin_reserved(buf, true);
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
if (unlikely(ret != 0))
goto err;
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
+ uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
+ bo->mem.start > 0 &&
+ buf->pin_count == 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
dev_priv->enable_fb = enable_fbdev;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ /*
+ * Workaround for low memory 2D VMs to compensate for the
+ * allocation taken by fbdev
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ mem_size *= 2;
+
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
spinlock_t hw_lock;
spinlock_t cap_lock;
bool has_dx;
+ bool assume_16bpp;
/*
* VGA registers.
par->set_fb = &vfb->base;
- if (!par->bo_ptr) {
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us.
- */
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev framebuffer.\n");
- return ret;
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- vfb->unpin(vfb);
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- return ret;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
return 0;
}
if (ret)
goto out_unlock;
+ if (!par->bo_ptr) {
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+ /*
+ * Pin before mapping. Since we don't know in what placement
+ * to pin, call into KMS to do it for us.
+ */
+ ret = vfb->pin(vfb);
+ if (ret) {
+ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+ par->vmw_bo->base.num_pages, &par->map);
+ if (ret) {
+ vfb->unpin(vfb);
+ DRM_ERROR("Could not map the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ }
+
+
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
- u32 assumed_bpp = 2;
+ u32 assumed_bpp = 4;
- /*
- * If using screen objects, then assume 32-bpp because that's what the
- * SVGA device is assuming
- */
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- assumed_bpp = 4;
+ if (dev_priv->assume_16bpp)
+ assumed_bpp = 2;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/frame.h>
#include <asm/hypervisor.h>
#include "drmP.h"
#include "vmwgfx_msg.h"
return -EINVAL;
}
-
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
/**
break;
}
+ if (retries == RETRIES)
+ return -EINVAL;
+
*msg_len = reply_len;
*msg = reply;
return 0;
}
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
/**
WARN_ON_ONCE(!stdu->defined);
- if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
- new_fb->height == mode->vdisplay)
+ new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+ if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
+ new_vfbs->surface->base_size.height == mode->vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->dmabuf)
new_content_type = SEPARATE_DMA;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
} else {
- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
content_srf = *new_vfbs->surface;
}
return ret;
}
} else if (new_content_type == SAME_AS_DISPLAY) {
- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
new_display_srf = vmw_surface_reference(new_vfbs->surface);
}
struct elo_priv *priv = hid_get_drvdata(hdev);
hid_hw_stop(hdev);
- flush_workqueue(wq);
+ cancel_delayed_work_sync(&priv->work);
kfree(priv);
}
#define MT_QUIRK_ALWAYS_VALID (1 << 4)
#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
+#define MT_QUIRK_CONFIDENCE (1 << 7)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
#define MT_QUIRK_NO_AREA (1 << 9)
#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
bool inrange_state; /* is the finger in proximity of the sensor? */
+ bool confidence_state; /* is the touch made by a finger? */
};
struct mt_class {
return 1;
case HID_DG_CONFIDENCE:
if (cls->name == MT_CLS_WIN_8 &&
- field->application == HID_DG_TOUCHPAD) {
- cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
- cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
- }
+ field->application == HID_DG_TOUCHPAD)
+ cls->quirks |= MT_QUIRK_CONFIDENCE;
mt_store_field(usage, td, hi);
return 1;
case HID_DG_TIPSWITCH:
return;
if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
+ int active;
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
struct input_mt *mt = input->mt;
return;
}
+ if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
+ s->confidence_state = 1;
+ active = (s->touch_state || s->inrange_state) &&
+ s->confidence_state;
+
input_mt_slot(input, slotnum);
- input_mt_report_slot_state(input, MT_TOOL_FINGER,
- s->touch_state || s->inrange_state);
- if (s->touch_state || s->inrange_state) {
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
+ if (active) {
/* this finger is in proximity of the sensor */
int wide = (s->w > s->h);
/* divided by two to match visual scale of touch */
td->curdata.touch_state = value;
break;
case HID_DG_CONFIDENCE:
+ if (quirks & MT_QUIRK_CONFIDENCE)
+ td->curdata.confidence_state = value;
if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
td->curvalid = value;
break;
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
+ /* Ntrig Panel */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_NTRIG, 0x1b05) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
goto inval;
} else if (uref->usage_index >= field->report_count)
goto inval;
-
- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
- uref->usage_index + uref_multi->num_values > field->report_count))
- goto inval;
}
+ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+ uref->usage_index + uref_multi->num_values > field->report_count))
+ goto inval;
+
switch (cmd) {
case HIDIOCGUSAGE:
uref->value = field->value[uref->usage_index];
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/ctype.h>
#include <linux/i8k.h>
static DEFINE_MUTEX(i8k_mutex);
static char bios_version[4];
+static char bios_machineid[16];
static struct device *i8k_hwmon_dev;
static u32 i8k_hwmon_flags;
static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
+static bool disallow_fan_type_call;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
#if IS_ENABLED(CONFIG_I8K)
-static bool restricted;
+static bool restricted = true;
module_param(restricted, bool, 0);
-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
+MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
static bool power_status;
module_param(power_status, bool, 0600);
-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
#endif
static uint fan_mult;
/*
* Read the fan type.
*/
-static int i8k_get_fan_type(int fan)
+static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
+ if (disallow_fan_type_call)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(®s) ? : regs.eax & 0xff;
}
+static int i8k_get_fan_type(int fan)
+{
+ /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
+ static int types[2] = { INT_MIN, INT_MIN };
+
+ if (types[fan] == INT_MIN)
+ types[fan] = _i8k_get_fan_type(fan);
+
+ return types[fan];
+}
+
/*
* Read the fan nominal rpm for specific fan speed.
*/
switch (cmd) {
case I8K_BIOS_VERSION:
+ if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
+ !isdigit(bios_version[2]))
+ return -EINVAL;
+
val = (bios_version[0] << 16) |
(bios_version[1] << 8) | bios_version[2];
break;
case I8K_MACHINE_ID:
- memset(buff, 0, 16);
- strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
- sizeof(buff));
+ if (restricted && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ memset(buff, 0, sizeof(buff));
+ strlcpy(buff, bios_machineid, sizeof(buff));
break;
case I8K_FN_STATUS:
seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
I8K_PROC_FMT,
bios_version,
- i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
cpu_temp,
left_fan, right_fan, left_speed, right_speed,
ac_power, fn_key);
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_type_call &&
+ (index == 9 || index == 12))
+ return 0;
if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
return 0;
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
- /* First fan attributes, if fan type is OK */
- err = i8k_get_fan_type(0);
+ /* First fan attributes, if fan status or type is OK */
+ err = i8k_get_fan_status(0);
+ if (err < 0)
+ err = i8k_get_fan_type(0);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
- /* Second fan attributes, if fan type is OK */
- err = i8k_get_fan_type(1);
+ /* Second fan attributes, if fan status or type is OK */
+ err = i8k_get_fan_status(1);
+ if (err < 0)
+ err = i8k_get_fan_type(1);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+/*
+ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
+ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
+ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+ */
+static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
{
- /*
- * CPU fan speed going up and down on Dell Studio XPS 8000
- * for unknown reasons.
- */
.ident = "Dell Studio XPS 8000",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
},
},
{
- /*
- * CPU fan speed going up and down on Dell Studio XPS 8100
- * for unknown reasons.
- */
.ident = "Dell Studio XPS 8100",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
},
},
+ {
+ .ident = "Dell Inspiron 580",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
+ },
+ },
{ }
};
/*
* Get DMI information
*/
- if (!dmi_check_system(i8k_dmi_table) ||
- dmi_check_system(i8k_blacklist_dmi_table)) {
+ if (!dmi_check_system(i8k_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
+ disallow_fan_type_call = true;
+
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
+ strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ sizeof(bios_machineid));
/*
* Get SMM Dell signature
*/
static int read_registers(struct fam15h_power_data *data)
{
- int this_cpu, ret, cpu;
int core, this_core;
cpumask_var_t mask;
+ int ret, cpu;
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
if (!ret)
memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
get_online_cpus();
- this_cpu = smp_processor_id();
/*
* Choose the first online core of each compute unit, and then
cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
}
- if (cpumask_test_cpu(this_cpu, mask))
- do_read_registers_on_cu(data);
+ on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
- smp_call_function_many(mask, do_read_registers_on_cu, data, true);
put_online_cpus();
-
free_cpumask_var(mask);
return 0;
int kind;
u32 flags;
- int update_interval; /* in milliseconds */
+ unsigned int update_interval; /* in milliseconds */
u8 config_orig; /* Original configuration register value */
u8 convrate_orig; /* Original conversion rate register value */
if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
- * buffer. As such zero-out the buffer so that we don't end
- * up with stale data.
- *
- * Since the tracer is still enabled drvdata::buf
- * can't be NULL.
+ * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+ * so we don't have to explicitly clear it. Also, since the
+ * tracer is still enabled drvdata::buf can't be NULL.
*/
- memset(drvdata->buf, 0, drvdata->size);
tmc_etr_enable_hw(drvdata);
} else {
/*
*/
vaddr = drvdata->vaddr;
paddr = drvdata->paddr;
- drvdata->buf = NULL;
+ drvdata->buf = drvdata->vaddr = NULL;
}
drvdata->reading = false;
int i;
bool found = false;
struct coresight_node *node;
- struct coresight_connection *conn;
/* An activated sink has been found. Enqueue the element */
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
- if (_coresight_build_path(conn->child_dev, path) == 0) {
+ struct coresight_device *child_dev = csdev->conns[i].child_dev;
+
+ if (child_dev && _coresight_build_path(child_dev, path) == 0) {
found = true;
break;
}
struct list_head *coresight_build_path(struct coresight_device *csdev)
{
struct list_head *path;
+ int rc;
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
INIT_LIST_HEAD(path);
- if (_coresight_build_path(csdev, path)) {
+ rc = _coresight_build_path(csdev, path);
+ if (rc) {
kfree(path);
- path = NULL;
+ return ERR_PTR(rc);
}
return path;
goto out;
path = coresight_build_path(csdev);
- if (!path) {
+ if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
+ ret = PTR_ERR(path);
goto out;
}
struct platform_device *mux_pdev;
#endif
struct platform_device *tco_pdev;
+
+ /*
+ * If set to true the host controller registers are reserved for
+ * ACPI AML use. Protected by acpi_lock.
+ */
+ bool acpi_reserved;
+ struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC (1 << 0)
int ret = 0, xact = 0;
struct i801_priv *priv = i2c_get_adapdata(adap);
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved) {
+ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
+ }
+
pm_runtime_get_sync(&priv->pci_dev->dev);
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
out:
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
return ret;
}
priv->tco_pdev = pdev;
}
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+{
+ struct i801_priv *priv = handler_context;
+ struct pci_dev *pdev = priv->pci_dev;
+ acpi_status status;
+
+ /*
+ * Once BIOS AML code touches the OpRegion we warn and inhibit any
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+
+ /*
+ * BIOS is accessing the host controller so prevent it from
+ * suspending automatically from now on.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ }
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ status = acpi_os_read_port(address, (u32 *)value, bits);
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (adev) {
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+ NULL, priv);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ }
+
+ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (!adev)
+ return;
+
+ acpi_remove_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved)
+ pm_runtime_put(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
priv->adapter.dev.parent = &dev->dev;
ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
priv->adapter.retries = 3;
+ mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
switch (dev->device) {
return -ENODEV;
}
- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
- if (err) {
+ if (i801_acpi_probe(priv))
return -ENODEV;
- }
err = pcim_iomap_regions(dev, 1 << SMBBAR,
dev_driver_string(&dev->dev));
"Failed to request SMBus region 0x%lx-0x%Lx\n",
priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
+ i801_acpi_remove(priv);
return err;
}
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+ i801_acpi_remove(priv);
return err;
}
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
+ i801_acpi_remove(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
return result;
for (i = 0; i < length; i++) {
- /* for the last byte TWSI_CTL_AAK must not be set */
- if (i + 1 == length)
+ /*
+ * For the last byte to receive TWSI_CTL_AAK must not be set.
+ *
+ * A special case is I2C_M_RECV_LEN where we don't know the
+ * additional length yet. If recv_len is set we assume we're
+ * not reading the final byte and therefore need to set
+ * TWSI_CTL_AAK.
+ */
+ if ((i + 1 == length) && !(recv_len && i == 0))
final_read = true;
/* clear iflg to allow next event */
data[i] = octeon_i2c_data_read(i2c);
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(i2c->dev,
- "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
- __func__, data[i]);
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
- }
length += data[i];
}
}
}
+ idx = 0;
+
do {
if (msgs[idx].len == 0) {
ret = -EINVAL;
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
- goto unprepare_div_clk;
+ goto disable_div_clk;
}
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
*/
-int __init
-i2c_register_board_info(int busnum,
- struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
int status;
mux->data.idle_in_use = true;
/* map address from "reg" if exists */
- if (of_address_to_resource(np, 0, &res)) {
+ if (of_address_to_resource(np, 0, &res) == 0) {
mux->data.reg_size = resource_size(&res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(mux->data.reg))
.remove = i2c_mux_reg_remove,
.driver = {
.name = "i2c-mux-reg",
+ .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
},
};
mutex_lock(&st->buf_lock);
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
- if (ret)
+ if (ret < 0)
goto error_ret;
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
break;
case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
- if (ret)
+ if (ret < 0)
goto error_ret;
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
ret = IIO_VAL_INT_PLUS_MICRO;
int st_accel_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
}
static const struct iio_trigger_ops st_accel_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
#else
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get(&spi->dev, "vref");
- if (!IS_ERR_OR_NULL(st->reg)) {
+ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
return ret;
st->vref_mv = ret / 1000;
} else {
+ /* Any other error indicates that the regulator does exist */
+ if (PTR_ERR(st->reg) != -ENODEV)
+ return PTR_ERR(st->reg);
/* Use internal reference */
st->vref_mv = 2500;
}
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ s64 timestamp;
- /* If we have a status register, check if this IRQ came from us */
- if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
- u8 status;
-
- len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.addr_stat_drdy,
- &status);
- if (len < 0)
- dev_err(sdata->dev, "could not read channel status\n");
-
- /*
- * If this was not caused by any channels on this sensor,
- * return IRQ_NONE
- */
- if (!(status & (u8)indio_dev->active_scan_mask[0]))
- return IRQ_NONE;
- }
+ /* If we do timetamping here, do it before reading the values */
+ if (sdata->hw_irq_trigger)
+ timestamp = sdata->hw_timestamp;
+ else
+ timestamp = iio_get_time_ns();
len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
if (len < 0)
goto st_sensors_get_buffer_element_error;
iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
- pf->timestamp);
+ timestamp);
st_sensors_get_buffer_element_error:
iio_trigger_notify_done(indio_dev->trig);
if (err < 0)
return err;
+ /* Disable DRDY, this might be still be enabled after reboot. */
+ err = st_sensors_set_dataready_irq(indio_dev, false);
+ if (err < 0)
+ return err;
+
if (sdata->current_fullscale) {
err = st_sensors_set_fullscale(indio_dev,
sdata->current_fullscale->num);
else
drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
+ /* Flag to the poll function that the hardware trigger is in use */
+ sdata->hw_irq_trigger = enable;
+
/* Enable/Disable the interrupt generator for data ready. */
err = st_sensors_write_data_with_mask(indio_dev,
sdata->sensor_settings->drdy_irq.addr,
#include <linux/iio/common/st_sensors.h>
#include "st_sensors_core.h"
+/**
+ * st_sensors_irq_handler() - top half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_handler(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ /* Get the time stamp as close in time as possible */
+ sdata->hw_timestamp = iio_get_time_ns();
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_thread(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * If this trigger is backed by a hardware interrupt and we have a
+ * status register, check if this IRQ came from us
+ */
+ if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+ u8 status;
+
+ ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ &status);
+ if (ret < 0) {
+ dev_err(sdata->dev, "could not read channel status\n");
+ goto out_poll;
+ }
+ /*
+ * the lower bits of .active_scan_mask[0] is directly mapped
+ * to the channels on the sensor: either bit 0 for
+ * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+ * gyroscopes or magnetometers. No sensor use more than 3
+ * channels, so cut the other status bits here.
+ */
+ status &= 0x07;
+
+ /*
+ * If this was not caused by any channels on this sensor,
+ * return IRQ_NONE
+ */
+ if (!indio_dev->active_scan_mask)
+ return IRQ_NONE;
+ if (!(status & (u8)indio_dev->active_scan_mask[0]))
+ return IRQ_NONE;
+ }
+
+out_poll:
+ /* It's our IRQ: proceed to handle the register polling */
+ iio_trigger_poll_chained(p);
+ return IRQ_HANDLED;
+}
+
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
return -ENOMEM;
}
+ iio_trigger_set_drvdata(sdata->trig, indio_dev);
+ sdata->trig->ops = trigger_ops;
+ sdata->trig->dev.parent = sdata->dev;
+
irq = sdata->get_irq_data_ready(indio_dev);
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
/*
sdata->sensor_settings->drdy_irq.addr_stat_drdy)
irq_trig |= IRQF_SHARED;
- err = request_threaded_irq(irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
+ /* Let's create an interrupt thread masking the hard IRQ here */
+ irq_trig |= IRQF_ONESHOT;
+
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ st_sensors_irq_handler,
+ st_sensors_irq_thread,
irq_trig,
sdata->trig->name,
sdata->trig);
goto iio_trigger_free;
}
- iio_trigger_set_drvdata(sdata->trig, indio_dev);
- sdata->trig->ops = trigger_ops;
- sdata->trig->dev.parent = sdata->dev;
-
err = iio_trigger_register(sdata->trig);
if (err < 0) {
dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
}
EXPORT_SYMBOL(st_sensors_deallocate_trigger);
+int st_sensors_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+ if (indio != indio_dev)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(st_sensors_validate_device);
+
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
MODULE_LICENSE("GPL v2");
config STX104
tristate "Apex Embedded Systems STX104 DAC driver"
- depends on X86 && ISA
+ depends on X86 && ISA_BUS_API
help
Say yes here to build support for the 2-channel DAC on the Apex
Embedded Systems STX104 integrated analog PC/104 card. The base port
device_for_each_child_node(st->dev, child) {
ret = fwnode_property_read_u32(child, "reg", ®);
- if (ret || reg > ARRAY_SIZE(st->channel_modes))
+ if (ret || reg >= ARRAY_SIZE(st->channel_modes))
continue;
ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
int st_gyro_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
}
static const struct iio_trigger_ops st_gyro_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
#else
struct am2315_sensor_data sensor_data;
ret = am2315_read_data(data, &sensor_data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
+ if (ret < 0)
goto err;
- }
mutex_lock(&data->lock);
if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
},
{ /* IIO_HUMIDITYRELATIVE channel */
.shift = 8,
- .mask = 2,
+ .mask = 3,
},
};
dev_err(&client->dev, "cannot read high byte measurement");
return ret;
}
- val = ret << 6;
+ val = ret << 8;
ret = i2c_smbus_read_byte(client);
if (ret < 0) {
dev_err(&client->dev, "cannot read low byte measurement");
return ret;
}
- val |= ret >> 2;
+ val |= ret;
return val;
}
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_TEMP) {
- *val = 165;
- *val2 = 65536 >> 2;
+ *val = 165000;
+ *val2 = 65536;
return IIO_VAL_FRACTIONAL;
} else {
- *val = 0;
- *val2 = 10000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 100;
+ *val2 = 65536;
+ return IIO_VAL_FRACTIONAL;
}
break;
case IIO_CHAN_INFO_OFFSET:
- *val = -3971;
- *val2 = 879096;
+ *val = -15887;
+ *val2 = 515151;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
};
static const struct bmi160_odr bmi160_accel_odr[] = {
- {0x01, 0, 78125},
- {0x02, 1, 5625},
- {0x03, 3, 125},
- {0x04, 6, 25},
- {0x05, 12, 5},
+ {0x01, 0, 781250},
+ {0x02, 1, 562500},
+ {0x03, 3, 125000},
+ {0x04, 6, 250000},
+ {0x05, 12, 500000},
{0x06, 25, 0},
{0x07, 50, 0},
{0x08, 100, 0},
{0x08, 100, 0},
{0x09, 200, 0},
{0x0A, 400, 0},
- {0x0B, 8000, 0},
+ {0x0B, 800, 0},
{0x0C, 1600, 0},
{0x0D, 3200, 0},
};
return regmap_update_bits(data->regmap,
bmi160_regs[t].config,
- bmi160_odr_table[t].tbl[i].bits,
- bmi160_regs[t].config_odr_mask);
+ bmi160_regs[t].config_odr_mask,
+ bmi160_odr_table[t].tbl[i].bits);
}
static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
int i;
acpi_status status;
union acpi_object *cpm;
+ int ret;
status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
if (ACPI_FAILURE(status))
}
}
}
-
+ ret = cpm->package.count;
kfree(buffer.pointer);
- return cpm->package.count;
+ return ret;
}
static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
/* Prevent the module from being removed whilst attached to a trigger */
__module_get(pf->indio_dev->info->driver_module);
+
+ /* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
+ if (pf->irq < 0)
+ goto out_put_module;
+
+ /* Request irq */
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
- if (ret < 0) {
- module_put(pf->indio_dev->info->driver_module);
- return ret;
- }
+ if (ret < 0)
+ goto out_put_irq;
+ /* Enable trigger in driver */
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
- module_put(pf->indio_dev->info->driver_module);
+ goto out_free_irq;
}
return ret;
+
+out_free_irq:
+ free_irq(pf->irq, pf);
+out_put_irq:
+ iio_trigger_put_irq(trig, pf->irq);
+out_put_module:
+ module_put(pf->indio_dev->info->driver_module);
+ return ret;
}
static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->dev.parent = &client->dev;
indio_dev->info = &apds9960_info;
indio_dev->name = APDS9960_DRV_NAME;
indio_dev->channels = apds9960_channels;
int ret;
if (!readval)
- bh1780_write(bh1780, (u8)reg, (u8)writeval);
+ return bh1780_write(bh1780, (u8)reg, (u8)writeval);
ret = bh1780_read(bh1780, (u8)reg);
if (ret < 0)
indio_dev->dev.parent = &client->dev;
indio_dev->info = &bh1780_info;
- indio_dev->name = id->name;
+ indio_dev->name = "bh1780";
indio_dev->channels = bh1780_channels;
indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
static int bh1780_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
int ret;
ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
static int bh1780_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
int ret;
ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
{
.type = IIO_PROXIMITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.scan_index = MAX44000_SCAN_INDEX_PRX,
.scan_type = {
.sign = 'u',
int st_magn_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
}
static const struct iio_trigger_ops st_magn_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
#else
if (ret < 0)
return ret;
if (chip_id != id->driver_data) {
- dev_err(&client->dev, "bad chip id. expected %x got %x\n",
- BMP280_CHIP_ID, chip_id);
+ dev_err(&client->dev, "bad chip id. expected %lx got %x\n",
+ id->driver_data, chip_id);
return -EINVAL;
}
int st_press_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_press_buffer_setup_ops);
}
#include <linux/iio/common/st_sensors.h>
#include "st_pressure.h"
+#define MCELSIUS_PER_CELSIUS 1000
+
+/* Default pressure sensitivity */
#define ST_PRESS_LSB_PER_MBAR 4096UL
#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
ST_PRESS_LSB_PER_MBAR)
+
+/* Default temperature sensitivity */
#define ST_PRESS_LSB_PER_CELSIUS 480UL
-#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
- ST_PRESS_LSB_PER_CELSIUS)
+#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
+
#define ST_PRESS_NUMBER_DATA_CHANNELS 1
/* FULLSCALE */
+#define ST_PRESS_FS_AVL_1100MB 1100
#define ST_PRESS_FS_AVL_1260MB 1260
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_PRESS_LPS331AP_PW_MASK 0x80
#define ST_PRESS_LPS331AP_FS_ADDR 0x23
#define ST_PRESS_LPS331AP_FS_MASK 0x30
-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
#define ST_PRESS_LPS331AP_BDU_MASK 0x04
#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
/* CUSTOM VALUES FOR LPS001WP SENSOR */
+
+/* LPS001WP pressure resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
+/* LPS001WP temperature resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
+
#define ST_PRESS_LPS001WP_WAI_EXP 0xba
#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
#define ST_PRESS_LPS001WP_ODR_MASK 0x30
#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
#define ST_PRESS_LPS001WP_PW_ADDR 0x20
#define ST_PRESS_LPS001WP_PW_MASK 0x40
+#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
+ (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
#define ST_PRESS_LPS001WP_BDU_MASK 0x04
#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
#define ST_PRESS_LPS25H_PW_ADDR 0x20
#define ST_PRESS_LPS25H_PW_MASK 0x80
-#define ST_PRESS_LPS25H_FS_ADDR 0x00
-#define ST_PRESS_LPS25H_FS_MASK 0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
#define ST_PRESS_LPS25H_BDU_ADDR 0x20
#define ST_PRESS_LPS25H_BDU_MASK 0x04
#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS25H_MULTIREAD_BIT true
-#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
.storagebits = 16,
.endianness = IIO_LE,
},
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
.modified = 0,
},
{
},
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SCALE),
.modified = 0,
},
IIO_CHAN_SOFT_TIMESTAMP(1)
.addr = ST_PRESS_LPS331AP_FS_ADDR,
.mask = ST_PRESS_LPS331AP_FS_MASK,
.fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS331AP datasheet.
+ */
[0] = {
.num = ST_PRESS_FS_AVL_1260MB,
- .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
- .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
- .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
},
},
},
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = 0,
+ .fs_avl = {
+ /*
+ * Pressure and temperature resolution values
+ * as defined in table 3 of LPS001WP datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1100MB,
+ .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
+ .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
+ },
+ },
},
.bdu = {
.addr = ST_PRESS_LPS001WP_BDU_ADDR,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = ST_PRESS_LPS25H_FS_ADDR,
- .mask = ST_PRESS_LPS25H_FS_MASK,
.fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS25H datasheet.
+ */
[0] = {
.num = ST_PRESS_FS_AVL_1260MB,
- .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
- .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
- .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
},
},
},
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
-
switch (ch->type) {
case IIO_PRESSURE:
+ *val = 0;
*val2 = press_data->current_fullscale->gain;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_TEMP:
+ *val = MCELSIUS_PER_CELSIUS;
*val2 = press_data->current_fullscale->gain2;
- break;
+ return IIO_VAL_FRACTIONAL;
default:
err = -EINVAL;
goto read_error;
}
- return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_OFFSET:
switch (ch->type) {
case IIO_TEMP:
- *val = 425;
- *val2 = 10;
+ *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
+ press_data->current_fullscale->gain2;
+ *val2 = MCELSIUS_PER_CELSIUS;
break;
default:
err = -EINVAL;
static const struct iio_trigger_ops st_press_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
#else
struct delayed_work work;
u32 tune_cap;
+ u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
u8 buf[2] ____cacheline_aligned;
};
.type = IIO_PROXIMITY,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_PROCESSED),
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
.scan_type = {
.sign = 'u',
/* storm out of range */
if (*val == AS3935_DATA_MASK)
return -EINVAL;
- *val *= 1000;
+
+ if (m == IIO_CHAN_INFO_PROCESSED)
+ *val *= 1000;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000;
break;
default:
return -EINVAL;
ret = as3935_read(st, AS3935_DATA, &val);
if (ret)
goto err_read;
- val &= AS3935_DATA_MASK;
- val *= 1000;
- iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
+ st->buffer[0] = val & AS3935_DATA_MASK;
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+ pf->timestamp);
err_read:
iio_trigger_notify_done(indio_dev->trig);
{
int ret = 0;
struct net_device *old_net_dev;
+ enum ib_gid_type old_gid_type;
/* in rdma_cap_roce_gid_table, this funciton should be protected by a
* sleep-able lock.
}
old_net_dev = table->data_vec[ix].attr.ndev;
+ old_gid_type = table->data_vec[ix].attr.gid_type;
if (old_net_dev && old_net_dev != attr->ndev)
dev_put(old_net_dev);
/* if modify_gid failed, just delete the old gid */
attr = &zattr;
table->data_vec[ix].context = NULL;
}
- if (default_gid)
- table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+ if (default_gid) {
+ table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+ if (action == GID_TABLE_WRITE_ACTION_DEL)
+ table->data_vec[ix].attr.gid_type = old_gid_type;
+ }
if (table->data_vec[ix].attr.ndev &&
table->data_vec[ix].attr.ndev != old_net_dev)
dev_hold(table->data_vec[ix].attr.ndev);
for (ix = 0; ix < table->sz; ix++)
if (table->data_vec[ix].attr.ndev == ndev)
- if (!del_gid(ib_dev, port, table, ix, false))
+ if (!del_gid(ib_dev, port, table, ix,
+ !!(table->data_vec[ix].props &
+ GID_TABLE_ENTRY_DEFAULT)))
deleted = true;
write_unlock_irq(&table->rwlock);
work->cm_event.event = IB_CM_USER_ESTABLISHED;
/* Check if the device started its remove_one */
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down) {
queue_delayed_work(cm.wq, &work->work, 0);
} else {
kfree(work);
ret = -ENODEV;
}
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
out:
return ret;
complete(&id_priv->comp);
}
-static int cma_disable_callback(struct rdma_id_private *id_priv,
- enum rdma_cm_state state)
-{
- mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != state) {
- mutex_unlock(&id_priv->handler_mutex);
- return -EINVAL;
- }
- return 0;
-}
-
struct rdma_cm_id *rdma_create_id(struct net *net,
rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps,
struct rdma_cm_event event;
int ret = 0;
+ mutex_lock(&id_priv->handler_mutex);
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+ id_priv->state != RDMA_CM_CONNECT) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
- return 0;
+ id_priv->state != RDMA_CM_DISCONNECT))
+ goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
- struct rdma_id_private *listen_id, *conn_id;
+ struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event;
struct net_device *net_dev;
int offset, ret;
goto net_dev_put;
}
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN) {
ret = -ECONNABORTED;
- goto net_dev_put;
+ goto err1;
}
memset(&event, 0, sizeof event);
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+ goto out;
memset(&event, 0, sizeof event);
switch (iw_event->event) {
return ret;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return ret;
}
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
- int ret;
+ int ret = -ECONNABORTED;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
listen_id = cm_id->context;
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
- return -ECONNABORTED;
+
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN)
+ goto out;
/* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+ goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
struct rdma_id_private *id_priv;
struct cma_multicast *mc = multicast->context;
struct rdma_cm_event event;
- int ret;
+ int ret = 0;
id_priv = mc->id_priv;
- if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
- cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+ id_priv->state != RDMA_CM_ADDR_RESOLVED)
+ goto out;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
return 0;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return 0;
}
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) {
- if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
- if (!err) {
- mc->igmp_joined = true;
- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ if (!err)
+ mc->igmp_joined = true;
}
} else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
if (err || port_attr->subnet_prefix)
return err;
+ if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+ return 0;
+
err = ib_query_gid(device, port_num, 0, &gid, NULL);
if (err)
return err;
goto err_mad;
}
- if (ib_add_ibnl_clients()) {
+ ret = ib_add_ibnl_clients();
+ if (ret) {
pr_warn("Couldn't register ibnl clients\n");
goto err_sa;
}
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
__func__, msg_seq);
- return -EINVAL;
+ return -EINVAL;
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
/* Now, check to see if there are any methods still in use */
if (!check_method_table(method)) {
/* If not, release management method table */
- kfree(method);
- class->method_table[mgmt_class] = NULL;
- /* Any management classes left ? */
+ kfree(method);
+ class->method_table[mgmt_class] = NULL;
+ /* Any management classes left ? */
if (!check_class_table(class)) {
/* If not, release management class table */
kfree(class);
static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320);
/*
* Counters added by extended set
&port_pma_attr_port_rcv_data.attr.attr,
&port_pma_attr_port_xmit_packets.attr.attr,
&port_pma_attr_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
&port_pma_attr_ext_port_xmit_data.attr.attr,
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_xmit_packets.attr.attr,
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
u8 port_num)
{
- struct attribute_group *hsag = NULL;
+ struct attribute_group *hsag;
struct rdma_hw_stats *stats;
- int i = 0, ret;
+ int i, ret;
stats = device->alloc_hw_stats(device, port_num);
return;
if (!stats->names || stats->num_counters <= 0)
- goto err;
+ goto err_free_stats;
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
hsag = kzalloc(sizeof(*hsag) +
- // 1 extra for the lifespan config entry
- sizeof(void *) * (stats->num_counters + 1),
+ sizeof(void *) * (stats->num_counters + 2),
GFP_KERNEL);
if (!hsag)
- return;
+ goto err_free_stats;
ret = device->get_hw_stats(device, stats, port_num,
stats->num_counters);
if (ret != stats->num_counters)
- goto err;
+ goto err_free_hsag;
stats->timestamp = jiffies;
hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
if (!hsag->attrs[i])
goto err;
+ sysfs_attr_init(hsag->attrs[i]);
}
/* treat an error here as non-fatal */
hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+ if (hsag->attrs[i])
+ sysfs_attr_init(hsag->attrs[i]);
if (port) {
struct kobject *kobj = &port->kobj;
return;
err:
- kfree(stats);
for (; i >= 0; i--)
kfree(hsag->attrs[i]);
+err_free_hsag:
kfree(hsag);
+err_free_stats:
+ kfree(stats);
return;
}
struct ib_srq *srq = NULL;
struct ib_qp *qp;
char *buf;
- struct ib_qp_init_attr attr;
+ struct ib_qp_init_attr attr = {};
struct ib_uverbs_ex_create_qp_resp resp;
int ret;
ah_attr->grh.dgid = sgid;
if (!rdma_cap_eth_ah(device, port_num)) {
- ret = ib_find_cached_gid_by_port(device, &dgid,
- IB_GID_TYPE_IB,
- port_num, NULL,
- &gid_index);
- if (ret)
- return ret;
+ if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+ ret = ib_find_cached_gid_by_port(device, &dgid,
+ IB_GID_TYPE_IB,
+ port_num, NULL,
+ &gid_index);
+ if (ret)
+ return ret;
+ } else {
+ gid_index = 0;
+ }
}
ah_attr->grh.sgid_index = (u8) gid_index;
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct cpu_mask_set *set = &dd->affinity->proc;
- char buf[1024];
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
goto done;
}
cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
&dd->affinity->rcv_intr.mask :
&dd->affinity->rcv_intr.used));
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
- hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+ hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+ cpumask_pr_args(intrs));
/*
* If we don't have a NUMA node requested, preference is towards
if (node == -1)
node = dd->node;
node_mask = cpumask_of_node(node);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+ hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+ cpumask_pr_args(node_mask));
/* diff will hold all unused cpus */
cpumask_andnot(diff, &set->mask, &set->used);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
- hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+ hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
/* get cpumask of available CPUs on preferred NUMA */
cpumask_and(mask, diff, node_mask);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+ hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
/*
* At first, we don't want to place processes on the same
cpumask_andnot(diff, &set->mask, &set->used);
cpumask_andnot(mask, diff, node_mask);
}
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+ hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+ cpumask_pr_args(mask));
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) /* empty */
static void dc_start(struct hfi1_devdata *);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np);
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
/*
* Error interrupt table entry. This is used as input to the interrupt
}
reset_neighbor_info(ppd);
- if (ppd->mgmt_allowed)
- remove_full_mgmt_pkey(ppd);
/* disable the port */
clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
__func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
ppd->pkeys[2] = FULL_MGMT_P_KEY;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
}
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
- ppd->pkeys[2] = 0;
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ if (ppd->pkeys[2] != 0) {
+ ppd->pkeys[2] = 0;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+ }
}
/*
* save first 2 flits in the packet that caused
* the error
*/
- dd->err_info_rcvport.packet_flit1 = hdr0;
- dd->err_info_rcvport.packet_flit2 = hdr1;
+ dd->err_info_rcvport.packet_flit1 = hdr0;
+ dd->err_info_rcvport.packet_flit2 = hdr1;
}
switch (info) {
case 1:
return 0;
}
+ /*
+ * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+ * pkey table can be configured properly if the HFI unit is connected
+ * to switch port with MgmtAllowed=NO
+ */
+ clear_full_mgmt_pkey(ppd);
+
return set_link_state(ppd, HLS_DN_POLL);
}
u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
& SEND_LEN_CHECK1_LEN_VL15_MASK) <<
SEND_LEN_CHECK1_LEN_VL15_SHIFT;
- int i;
+ int i, j;
u32 thres;
for (i = 0; i < ppd->vls_supported; i++) {
sc_mtu_to_threshold(dd->vld[i].sc,
dd->vld[i].mtu,
dd->rcd[0]->rcvhdrqentsize));
- sc_set_cr_threshold(dd->vld[i].sc, thres);
+ for (j = 0; j < INIT_SC_PER_VL; j++)
+ sc_set_cr_threshold(
+ pio_select_send_context_vl(dd, j, i),
+ thres);
}
thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
sc_mtu_to_threshold(dd->vld[15].sc,
hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
}
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
#define C_MAX_NAME 13 /* 12 chars + one for /0 */
{
unsigned long flags;
struct hfi1_devdata *tmp, *peer = NULL;
+ struct hfi1_asic_data *asic_data;
int ret = 0;
+ /* pre-allocate the asic structure in case we are the first device */
+ asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+ if (!asic_data)
+ return -ENOMEM;
+
spin_lock_irqsave(&hfi1_devs_lock, flags);
/* Find our peer device */
list_for_each_entry(tmp, &hfi1_dev_list, list) {
}
if (peer) {
+ /* use already allocated structure */
dd->asic_data = peer->asic_data;
+ kfree(asic_data);
} else {
- dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
- if (!dd->asic_data) {
- ret = -ENOMEM;
- goto done;
- }
+ dd->asic_data = asic_data;
mutex_init(&dd->asic_data->asic_resource_mutex);
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
return ret;
}
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
+ if (uctxt)
+ return -EINVAL;
+
if (copy_from_user(&uinfo,
(struct hfi1_user_info __user *)arg,
sizeof(uinfo)))
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
dd->rcvhdrtail_dummy_physaddr);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
}
for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = NULL;
+ struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
memset(data, 0, size);
}
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+{
+ struct ib_event event;
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.rdi.ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
+}
+
static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
{
struct ib_mad_send_buf *send_buf;
}
if (changed) {
- struct ib_event event;
-
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
+ hfi1_event_pkey_change(dd, port);
}
+
return 0;
}
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+
#endif /* _HFI1_MAD_H */
/* counter is reset if occupancy count changes */
if (reg != reg_prev)
loop = 0;
- if (loop > 500) {
+ if (loop > 50000) {
/* timed out - bounce the link */
dd_dev_err(dd,
"%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
pio_map_free(m);
}
+/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+ u32 thres;
+
+ thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+ 50),
+ sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+ dd->vld[i].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
/*
* pio_map_init - called when #vls change
* @dd: hfi1_devdata
if (!newmap->map[i])
goto bail;
newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
- /* assign send contexts */
+ /*
+ * assign send contexts and
+ * adjust credit return threshold
+ */
for (j = 0; j < sz; j++) {
- if (dd->kernel_send_context[scontext])
+ if (dd->kernel_send_context[scontext]) {
newmap->map[i]->ksc[j] =
dd->kernel_send_context[scontext];
+ set_threshold(dd, scontext, i);
+ }
if (++scontext >= first_scontext +
vl_scontexts[i])
/* wrap back to first send context */
if (ppd->qsfp_info.cache_valid) {
if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
- sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+ snprintf(lenstr, sizeof(lenstr), "%dM ",
+ cache[QSFP_MOD_LEN_OFFS]);
power_byte = cache[QSFP_MOD_PWR_OFFS];
sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
return ret;
}
-const char *print_u64_array(
- struct trace_seq *p,
- u64 *arr, int len)
-{
- int i;
- const char *ret = trace_seq_buffer_ptr(p);
-
- for (i = 0; i < len; i++)
- trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
- trace_seq_putc(p, 0);
- return ret;
-}
-
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
- bool sc4_bit = has_sc4_bit(packet);
- u8 sc;
+ u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
u32 bth1;
int is_mcast;
struct ib_grh *grh = NULL;
*/
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl, sc5;
+ u8 sl;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
}
if (qp->ibqp.qp_num > 1) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u16 slid;
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
slid = be16_to_cpu(hdr->lrh[3]);
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp *smp = (struct opa_smp *)data;
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
goto drop;
}
wc.slid = be16_to_cpu(hdr->lrh[3]);
- sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc |= sc4_bit;
- wc.sl = ibp->sc_to_sl[sc];
+ wc.sl = ibp->sc_to_sl[sc5];
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
struct sdma_mmu_node *node;
};
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
struct sdma_mmu_node {
struct mmu_rb_node rb;
*/
SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
req->tidoffset, req->tidoffset / req->omfactor,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
req->tidoffset / req->omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
}
done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
- unsigned long flags;
- spin_lock_irqsave(&qp->s_lock, flags);
write_seqlock(&dev->iowait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
}
out:
write_sequnlock(&dev->iowait_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
return tx;
}
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
+ __must_hold(&qp->slock)
{
struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv;
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768
+#define I40IW_MAX_PAGES_PER_FMR 512
+#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460
cqp_init_info.scratch_array = cqp->scratch_array;
status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
if (status) {
- i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
- status, maj_err, min_err);
+ i40iw_pr_err("cqp init status %d\n", status);
goto exit;
}
status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1;
+ props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0;
}
info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->length;
+ info->remote_access = true;
cqp_info->cqp_cmd = OP_ALLOC_STAG;
cqp_info->post_sq = 1;
cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
mutex_unlock(&iwdev->pbl_mutex);
- if (!status)
+ if (status)
goto err1;
if (palloc->level != I40IW_LEVEL_1)
struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
struct i40iw_fast_reg_stag_info info;
+ memset(&info, 0, sizeof(info));
info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
info.access_rights |= i40iw_get_user_access(flags);
info.stag_key = reg_wr(ib_wr)->key & 0xff;
info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
info.total_len = iwmr->ibmr.length;
+ info.reg_addr_pa = *(u64 *)palloc->level1.addr;
info.first_pm_pbl_index = palloc->level1.idx;
info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
+ if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+ info.chunk_size = 1;
+
if (page_shift == 21)
info.page_size = 1; /* 2M page */
{
struct i40iw_cq *iwcq;
struct i40iw_cq_uk *ukcq;
- enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+ unsigned long flags;
+ enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
iwcq = (struct i40iw_cq *)ibcq;
ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_NEXT_COMP)
- cq_notify = IW_CQ_COMPL_EVENT;
+ if (notify_flags == IB_CQ_SOLICITED)
+ cq_notify = IW_CQ_COMPL_SOLICITED;
+ spin_lock_irqsave(&iwcq->lock, flags);
ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
return 0;
}
ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
ah->av.ib.g_slid = ah_attr->src_path_bits;
+ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
if (ah_attr->ah_flags & IB_AH_GRH) {
ah->av.ib.g_slid |= 0x80;
ah->av.ib.gid_index = ah_attr->grh.sgid_index;
!(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
--ah->av.ib.stat_rate;
}
- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
return &ah->ibah;
}
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock);
if (ret)
- goto out;
+ goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
- if (ret)
- ib_destroy_ah(ah);
+ if (!ret)
+ return 0;
+ out:
+ spin_lock(&tun_qp->tx_lock);
+ tun_qp->tx_ix_tail++;
+ spin_unlock(&tun_qp->tx_lock);
+ tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+ ib_destroy_ah(ah);
return ret;
}
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+
+ spin_lock(&sqp->tx_lock);
+ sqp->tx_ix_tail++;
+ spin_unlock(&sqp->tx_lock);
+ sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- if (ret)
- ib_destroy_ah(ah);
+ ib_destroy_ah(ah);
return ret;
}
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
+ if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+ return ERR_PTR(-EINVAL);
+
if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
(flow_attr->type != IB_FLOW_ATTR_NORMAL))
return ERR_PTR(-EOPNOTSUPP);
u32 max_pages;
struct mlx4_mr mmr;
struct ib_umem *umem;
- void *pages_alloc;
+ size_t page_map_size;
};
struct mlx4_ib_mw {
struct mlx4_ib_mr *mr,
int max_pages)
{
- int size = max_pages * sizeof(u64);
- int add_size;
int ret;
- add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+ /* Ensure that size is aligned to DMA cacheline
+ * requirements.
+ * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+ * so page_map_size will never cross PAGE_SIZE.
+ */
+ mr->page_map_size = roundup(max_pages * sizeof(u64),
+ MLX4_MR_PAGES_ALIGN);
- mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
- if (!mr->pages_alloc)
+ /* Prevent cross page boundary allocation. */
+ mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+ if (!mr->pages)
return -ENOMEM;
- mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
mr->page_map = dma_map_single(device->dma_device, mr->pages,
- size, DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
if (dma_mapping_error(device->dma_device, mr->page_map)) {
ret = -ENOMEM;
}
return 0;
-err:
- kfree(mr->pages_alloc);
+err:
+ free_page((unsigned long)mr->pages);
return ret;
}
{
if (mr->pages) {
struct ib_device *device = mr->ibmr.device;
- int size = mr->max_pages * sizeof(u64);
dma_unmap_single(device->dma_device, mr->page_map,
- size, DMA_TO_DEVICE);
- kfree(mr->pages_alloc);
+ mr->page_map_size, DMA_TO_DEVICE);
+ free_page((unsigned long)mr->pages);
mr->pages = NULL;
}
}
mr->npages = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
- sizeof(u64) * mr->max_pages,
- DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
- sizeof(u64) * mr->max_pages,
- DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
return rc;
}
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
- sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
udata, 0, &qp, gfp);
- if (err)
+ if (err) {
+ kfree(qp);
return ERR_PTR(err);
+ }
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
int eqn;
int err;
- if (entries < 0)
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
if (check_cq_create_flags(attr->flags))
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
pma_cnt_ext->port_xmit_data =
cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
transmitted_ib_multicast.octets) >> 2);
- pma_cnt_ext->port_xmit_data =
+ pma_cnt_ext->port_rcv_data =
cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
received_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_packets =
MLX5_CAP_ETH(dev->mdev, scatter_fcs))
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+ if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = L1_CACHE_BYTES;
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
- if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ /*
+ * We don't want to expose information from the PCI bar that is located
+ * after 4096 bytes, so if the arch only supports larger pages, let's
+ * pretend we don't support reading the HCA's core clock. This is also
+ * forced by mmap function.
+ */
+ if (PAGE_SIZE <= 4096 &&
+ field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset =
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+ return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
break;
case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
-
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr)
+ u32 path_flags, const struct ib_qp_attr *attr,
+ bool alt)
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = attr->pkey_index;
+ path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+ attr->pkey_index);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >=
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
} else {
- path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
- 0;
+ path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+ path->fl_free_ar |=
+ (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
path->rlid = cpu_to_be16(ah->dlid);
path->grh_mlid = ah->src_path_bits & 0x7f;
if (ah->ah_flags & IB_AH_GRH)
path->port = port;
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = attr->timeout << 3;
+ path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = attr->pkey_index;
+ context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
/* todo implement counter_index functionality */
if (attr_mask & IB_QP_AV) {
err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
- attr_mask, 0, attr);
+ attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_ALT_PATH) {
err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
&context->alt_path,
- attr->alt_port_num, attr_mask, 0, attr);
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ 0, attr, true);
if (err)
goto out;
}
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
-
- } else {
- return 0;
+ } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+ return MLX5_FENCE_MODE_FENCE;
}
+
+ return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+ qp_attr->alt_pkey_index =
+ be16_to_cpu(context->alt_path.pkey_index);
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
- qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+ qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
qp_attr->port_num = context->pri_path.port;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
switch (cmd.type) {
case QIB_CMD_ASSIGN_CTXT:
+ if (rcd) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
if (ret)
goto bail;
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/workqueue.h>
#include <linux/list.h>
int i;
int flags;
dma_addr_t pa;
- DEFINE_DMA_ATTRS(attrs);
-
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (!can_do_mlock())
return -EPERM;
/* wrap to first map page, invert bit 0 */
offset = qpt->incr | ((offset & 1) ^ 1);
}
- /* there can be no bits at shift and below */
- WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+ /* there can be no set bits in low-order QoS bits */
+ WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
qpn = mk_qpn(qpt, map, offset);
}
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
+ __releases(&qp->s_lock)
+ __releases(&qp->s_hlock)
+ __releases(&qp->r_lock)
+ __acquires(&qp->r_lock)
+ __acquires(&qp->s_hlock)
+ __acquires(&qp->s_lock)
{
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_mig_state = IB_MIG_MIGRATED;
- if (qp->s_ack_queue)
- memset(
- qp->s_ack_queue,
- 0,
- rvt_max_atomic(rdi) *
- sizeof(*qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0;
* initialization that is needed.
*/
priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
- if (!priv)
+ if (IS_ERR(priv)) {
+ ret = priv;
goto bail_qp;
+ }
qp->priv = priv;
qp->timeout_jiffies =
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
!rdi->driver_f.quiesce_qp ||
!rdi->driver_f.notify_error_qp ||
!rdi->driver_f.mtu_from_qp ||
- !rdi->driver_f.mtu_to_path_mtu ||
- !rdi->driver_f.shut_down_port ||
- !rdi->driver_f.cap_mask_chg)
+ !rdi->driver_f.mtu_to_path_mtu)
return -EINVAL;
break;
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_FLAG_DEV_ADDR_SET = 13,
IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+ IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
{
struct net_device *dev = to_net_dev(d);
int ret;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+ return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
return false;
- netif_addr_lock(priv->dev);
+ netif_addr_lock_bh(priv->dev);
/* The subnet prefix may have changed, update it now so we won't have
* to do it later
search_gid.global.interface_id = priv->local_gid.global.interface_id;
- netif_addr_unlock(priv->dev);
+ netif_addr_unlock_bh(priv->dev);
err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
priv->dev, &port, &index);
- netif_addr_lock(priv->dev);
+ netif_addr_lock_bh(priv->dev);
if (search_gid.global.interface_id !=
priv->local_gid.global.interface_id)
}
out:
- netif_addr_unlock(priv->dev);
+ netif_addr_unlock_bh(priv->dev);
return ret;
}
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
struct ipoib_dev_priv *child_priv;
struct net_device *netdev = priv->dev;
- netif_addr_lock(netdev);
+ netif_addr_lock_bh(netdev);
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
- netif_addr_unlock(netdev);
+ netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
down_read(&priv->vlan_rwsem);
union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
int ret = 0;
- netif_addr_lock(dev);
+ netif_addr_lock_bh(dev);
/* Make sure the QPN, reserved and subnet prefix match the current
* lladdr, it also makes sure the lladdr is unicast.
gid->global.interface_id == 0)
ret = -EINVAL;
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
return ret;
}
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
+ /* mark interface in the middle of destruction */
+ set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();
return;
}
priv->local_lid = port_attr.lid;
- netif_addr_lock(dev);
+ netif_addr_lock_bh(dev);
if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
return;
}
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
if (!rtnl_trylock())
return restart_syscall();
{
unsigned int sg_offset = 0;
- state->desc = req->indirect_desc;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
struct scatterlist *sg;
int i;
- state->desc = req->indirect_desc;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
+ state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
else if (dev->use_fmr)
int mr_page_shift, p;
u64 max_pages_per_mr;
- srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+ srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return;
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->global_mr))
goto err_pd;
- } else {
- srp_dev->global_mr = NULL;
}
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
*/
qp_init->cap.max_send_wr = srp_sq_size / 2;
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
- qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd,
- sdev->device->attrs.max_sge);
+ qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
qp_init->port_num = ch->sport->port;
ch->qp = ib_create_qp(sdev->pd, qp_init);
SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
SRPT_DEF_SG_TABLESIZE = 128,
+ SRPT_DEF_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
case XTYPE_XBOXONE:
packet->data[0] = 0x09; /* activate rumble */
- packet->data[1] = 0x08;
+ packet->data[1] = 0x00;
packet->data[2] = xpad->odata_serial++;
- packet->data[3] = 0x08; /* continuous effect */
- packet->data[4] = 0x00; /* simple rumble mode */
- packet->data[5] = 0x03; /* L and R actuator only */
- packet->data[6] = 0x00; /* TODO: LT actuator */
- packet->data[7] = 0x00; /* TODO: RT actuator */
+ packet->data[3] = 0x09;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x0F;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
- packet->data[10] = 0x80; /* length of pulse */
- packet->data[11] = 0x00; /* stop period of pulse */
+ packet->data[10] = 0xFF;
+ packet->data[11] = 0x00;
packet->data[12] = 0x00;
packet->len = 13;
packet->pending = true;
int ep_irq_in_idx;
int i, error;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+ return -ENODEV;
+
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
break;
}
- if (xpad_device[i].xtype == XTYPE_XBOXONE &&
- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
- /*
- * The Xbox One controller lists three interfaces all with the
- * same interface class, subclass and protocol. Differentiate by
- * interface number.
- */
- return -ENODEV;
- }
-
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
if (!xpad)
return -ENOMEM;
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
xpad->xtype = XTYPE_XBOX360W;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+ xpad->xtype = XTYPE_XBOXONE;
else
xpad->xtype = XTYPE_XBOX360;
} else {
xpad->mapping |= MAP_STICKS_TO_NULL;
}
+ if (xpad->xtype == XTYPE_XBOXONE &&
+ intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+ * interface number.
+ */
+ error = -ENODEV;
+ goto err_free_in_urb;
+ }
+
error = xpad_init_output(intf, xpad);
if (error)
goto err_free_in_urb;
case 5:
etd->hw_version = 3;
break;
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 13:
- case 14:
+ case 6 ... 14:
etd->hw_version = 4;
break;
default:
return -ENXIO;
}
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
/* Check if the device is present */
response = ~VMMOUSE_PROTO_MAGIC;
VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
- release_region(VMMOUSE_PROTO_PORT, 4);
+ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
return -ENXIO;
- }
if (set_properties) {
psmouse->vendor = VMMOUSE_VENDOR;
psmouse->model = version;
}
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return 0;
}
psmouse_reset(psmouse);
input_unregister_device(priv->abs_dev);
kfree(priv);
- release_region(VMMOUSE_PROTO_PORT, 4);
}
/**
struct input_dev *rel_dev = psmouse->dev, *abs_dev;
int error;
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
psmouse_reset(psmouse);
error = vmmouse_enable(psmouse);
if (error)
- goto release_region;
+ return error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
abs_dev = input_allocate_device();
kfree(priv);
psmouse->private = NULL;
-release_region:
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return error;
}
static void rmi_function_of_probe(struct rmi_function *fn)
{
char of_name[9];
+ struct device_node *node = fn->rmi_dev->xport->dev->of_node;
snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
fn->fd.function_number);
- fn->dev.of_node = of_find_node_by_name(
- fn->rmi_dev->xport->dev->of_node, of_name);
+ fn->dev.of_node = of_get_child_by_name(node, of_name);
}
#else
static inline void rmi_function_of_probe(struct rmi_function *fn)
struct rmi_device *rmi_dev = fn->rmi_dev;
int ret;
int offset;
- u8 buf[14];
+ u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
int clip_x_low = 0;
offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
- if (item->reg_size > 14) {
- dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
- item->reg_size);
+ if (item->reg_size > sizeof(buf)) {
+ dev_err(&fn->dev,
+ "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+ sizeof(buf), item->reg_size);
return -ENODEV;
}
return -ENODEV;
}
+ ts->regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
+ if (IS_ERR(ts->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(ts->regmap);
+ }
+
error = of_property_read_u32_index(np, "syscon", 1, ®);
if (error < 0) {
dev_err(dev, "no offset in syscon\n");
ts->bit = BIT(bit);
- ts->regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(ts->regmap)) {
- dev_err(dev, "cannot get parent's regmap\n");
- return PTR_ERR(ts->regmap);
- }
-
return 0;
}
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-#define W8001_MAX_LENGTH 11
+#define W8001_MAX_LENGTH 13
#define W8001_LEAD_MASK 0x80
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
bool touch = data[0] & (1 << i);
input_mt_slot(dev, i);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
if (touch) {
x = (data[6 * i + 1] << 7) | data[6 * i + 2];
y = (data[6 * i + 3] << 7) | data[6 * i + 4];
w8001->idx = 0;
parse_multi_touch(w8001);
break;
+
+ default:
+ /*
+ * ThinkPad X60 Tablet PC (pen only device) sometimes
+ * sends invalid data packets that are larger than
+ * W8001_PKTLEN_TPCPEN. Let's start over again.
+ */
+ if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
+ w8001->idx = 0;
}
return IRQ_HANDLED;
0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y,
0, touch.y, 0, 0);
+ input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
strlcat(basename, " 2FG", basename_sz);
if (w8001->max_pen_x && w8001->max_pen_y)
break;
}
+ devid = e->devid;
DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
hid, uid,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
- devid = e->devid;
flags = e->flags;
ret = add_acpi_hid_device(hid, uid, &devid, false);
break;
}
+ /*
+ * Order is important here to make sure any unity map requirements are
+ * fulfilled. The unity mappings are created and written to the device
+ * table during the amd_iommu_init_api() call.
+ *
+ * After that we call init_device_table_dma() to make sure any
+ * uninitialized DTE will block DMA, and in the end we flush the caches
+ * of all IOMMUs to make sure the changes to the device table are
+ * active.
+ */
+ ret = amd_iommu_init_api();
+
init_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
- ret = amd_iommu_init_api();
-
if (!ret)
print_iommu_info();
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
}
}
- iommu_flush_write_buffer(iommu);
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
#ifdef CONFIG_INTEL_IOMMU_SVM
#endif
}
+ /*
+ * Now that qi is enabled on all iommus, set the root entry and flush
+ * caches. This is required on some Intel X58 chipsets, otherwise the
+ * flush_context function will loop forever and the boot hangs.
+ */
+ for_each_active_iommu(iommu, drhd) {
+ iommu_flush_write_buffer(iommu);
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ }
+
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
struct dmar_domain *domain;
- u16 did;
+ int did;
if (!iommu)
continue;
- for (did = 0; did < 0xffff; did++) {
- domain = get_iommu_domain(iommu, did);
+ for (did = 0; did < cap_ndoms(iommu->cap); did++) {
+ domain = get_iommu_domain(iommu, (u16)did);
if (!domain)
continue;
/* Try replenishing IOVAs by flushing rcache. */
flushed_rcache = true;
+ preempt_disable();
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
+ preempt_enable();
goto retry;
}
bool can_insert = false;
unsigned long flags;
- cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_full(cpu_rcache->loaded)) {
iova_magazine_push(cpu_rcache->loaded, iova_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ put_cpu_ptr(rcache->cpu_rcaches);
if (mag_to_free) {
iova_magazine_free_pfns(mag_to_free, iovad);
bool has_pfn = false;
unsigned long flags;
- cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_empty(cpu_rcache->loaded)) {
iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ put_cpu_ptr(rcache->cpu_rcaches);
return iova_pfn;
}
dte_addr = virt_to_phys(rk_domain->dt);
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
- rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
u64 flags;
u32 ite_size;
u32 device_ids;
+ int numa_node;
};
#define ITS_ITT_ALIGN SZ_256
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
+ const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ if (its_dev->its->numa_node >= 0) {
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+ if (!cpumask_intersects(mask_val, cpu_mask))
+ return -EINVAL;
+ }
+ }
+
+ cpu = cpumask_any_and(mask_val, cpu_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
+ /* avoid cross node collections and its mapping */
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (its->numa_node != NUMA_NO_NODE &&
+ its->numa_node != of_node_to_nid(cpu_node))
+ continue;
+ }
+
/*
* We now have to bind each collection to its target
* redistributor.
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ const struct cpumask *cpu_mask = cpu_online_mask;
+
+ /* get the cpu_mask of local node */
+ if (its_dev->its->numa_node >= 0)
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
}
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
.mask = 0xffff0fff,
.init = its_enable_quirk_cavium_22375,
},
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+ {
+ .desc = "ITS: Cavium erratum 23144",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_23144,
+ },
#endif
{
}
its->base = its_base;
its->phys_base = res.start;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->numa_node = of_node_to_nid(node);
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
if (!its->cmd_base) {
while (count--) {
val = readl_relaxed(rbase + GICR_WAKER);
- if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
cpu_relax();
udelay(1);
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
- gic_map_to_vpe(intr, vpe);
+ gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
/* verify that it doesn't conflict with an IPI irq */
if (test_bit(spec->hwirq, ipi_resrv))
return -EBUSY;
+
+ hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
+
+ return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ &gic_level_irq_controller,
+ NULL);
} else {
base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
if (base_hwirq == gic_shared_intrs) {
&gic_level_irq_controller,
NULL);
if (ret)
- return ret;
+ goto error;
}
return 0;
+
+error:
+ irq_domain_free_irqs_parent(d, virq, nr_irqs);
+ return ret;
}
void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
switch (bus_token) {
case DOMAIN_BUS_IPI:
is_ipi = d->bus_token == bus_token;
- return to_of_node(d->fwnode) == node && is_ipi;
+ return (!node || to_of_node(d->fwnode) == node) && is_ipi;
break;
default:
return 0;
/* set polarity for external interrupts only */
for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
if (priv->ext_irqs[i] == data->hwirq) {
- ret = pic32_set_ext_polarity(i + 1, flow_type);
+ ret = pic32_set_ext_polarity(i, flow_type);
if (ret)
return ret;
}
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
+ led_cdev->flags &= ~LED_BLINK_SW;
return;
}
if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
return;
}
return;
}
+ led_cdev->flags |= LED_BLINK_SW;
mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
+ led_cdev->flags &= ~LED_BLINK_SW;
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
enum led_brightness brightness)
{
/*
- * In case blinking is on delay brightness setting
+ * If software blink is active, delay brightness setting
* until the next timer tick.
*/
- if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
+ if (led_cdev->flags & LED_BLINK_SW) {
/*
* If we need to disable soft blinking delegate this to the
* work queue task to avoid problems in case we are called
#include <linux/sched.h>
#include <linux/leds.h>
#include <linux/reboot.h>
+#include <linux/suspend.h>
#include "../leds.h"
static int panic_heartbeats;
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_pm_notifier(struct notifier_block *nb,
+ unsigned long pm_event, void *unused)
+{
+ int rc;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ led_trigger_unregister(&heartbeat_led_trigger);
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ rc = led_trigger_register(&heartbeat_led_trigger);
+ if (rc)
+ pr_err("could not re-register heartbeat trigger\n");
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static int heartbeat_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *unused)
{
return NOTIFY_DONE;
}
+static struct notifier_block heartbeat_pm_nb = {
+ .notifier_call = heartbeat_pm_notifier,
+};
+
static struct notifier_block heartbeat_reboot_nb = {
.notifier_call = heartbeat_reboot_notifier,
};
atomic_notifier_chain_register(&panic_notifier_list,
&heartbeat_panic_nb);
register_reboot_notifier(&heartbeat_reboot_nb);
+ register_pm_notifier(&heartbeat_pm_nb);
}
return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_pm_notifier(&heartbeat_pm_nb);
unregister_reboot_notifier(&heartbeat_reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list,
&heartbeat_panic_nb);
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
const struct mcb_device_id *found_id;
+ struct module *carrier_mod;
+ int ret;
found_id = mcb_match_id(mdrv->id_table, mdev);
if (!found_id)
return -ENODEV;
- return mdrv->probe(mdev, found_id);
+ carrier_mod = mdev->dev.parent->driver->owner;
+ if (!try_module_get(carrier_mod))
+ return -EINVAL;
+
+ get_device(dev);
+ ret = mdrv->probe(mdev, found_id);
+ if (ret)
+ module_put(carrier_mod);
+
+ return ret;
}
static int mcb_remove(struct device *dev)
{
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
+ struct module *carrier_mod;
mdrv->remove(mdev);
+ carrier_mod = mdev->dev.parent->driver->owner;
+ module_put(carrier_mod);
+
put_device(&mdev->dev);
return 0;
V4L2_DV_BT_CAP_CUSTOM)
};
-static inline const struct v4l2_dv_timings_cap *
-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+/*
+ * Return the DV timings capabilities for the requested sink pad. As a special
+ * case, pad value -1 returns the capabilities for the currently selected input.
+ */
+static const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
{
- return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
- &adv7604_timings_cap_analog;
+ if (pad == -1) {
+ struct adv76xx_state *state = to_state(sd);
+
+ pad = state->selected_input;
+ }
+
+ switch (pad) {
+ case ADV76XX_PAD_HDMI_PORT_A:
+ case ADV7604_PAD_HDMI_PORT_B:
+ case ADV7604_PAD_HDMI_PORT_C:
+ case ADV7604_PAD_HDMI_PORT_D:
+ return &adv76xx_timings_cap_digital;
+
+ case ADV7604_PAD_VGA_RGB:
+ case ADV7604_PAD_VGA_COMP:
+ default:
+ return &adv7604_timings_cap_analog;
+ }
}
const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
- adv76xx_get_dv_timings_cap(sd),
+ adv76xx_get_dv_timings_cap(sd, -1),
adv76xx_check_dv_timings, NULL))
continue;
if (vtotal(bt) != stdi->lcf + 1)
return -EINVAL;
return v4l2_enum_dv_timings_cap(timings,
- adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
+ adv76xx_get_dv_timings_cap(sd, timings->pad),
+ adv76xx_check_dv_timings, NULL);
}
static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
struct adv76xx_state *state = to_state(sd);
+ unsigned int pad = cap->pad;
if (cap->pad >= state->source_pad)
return -EINVAL;
- *cap = *adv76xx_get_dv_timings_cap(sd);
+ *cap = *adv76xx_get_dv_timings_cap(sd, pad);
+ cap->pad = pad;
+
return 0;
}
static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
- is_digital_input(sd) ? 250000 : 1000000,
- adv76xx_check_dv_timings, NULL);
+ v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
+ is_digital_input(sd) ? 250000 : 1000000,
+ adv76xx_check_dv_timings, NULL);
}
static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
bt = &timings->bt;
- if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+ if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
adv76xx_check_dv_timings, NULL))
return -ERANGE;
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
- goto err_unregister_v4l2_dev;
+ goto err_free_controls;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
if (__get_user(p, &up->menu_info))
return -EFAULT;
- umenus = compat_ptr(p);
- if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
- kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
- if (kmenus == NULL)
- return -EFAULT;
- kp->menu_info = kmenus;
-
- if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
+ kp->menu_info = compat_ptr(p);
return 0;
}
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus = kp->menu_info;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
__put_user(kp->menu_count, &up->menu_count))
if (__clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
- if (kp->menu_count == 0)
- return 0;
-
- if (get_user(p, &up->menu_info))
- return -EFAULT;
- umenus = compat_ptr(p);
-
- if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
return 0;
}
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
if (__get_user(p, &up->data))
return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- kdata = compat_alloc_user_space(kp->size);
- if (kdata == NULL)
- return -EFAULT;
- kp->data = kdata;
-
- if (copy_in_user(kdata, udata, kp->size))
- return -EFAULT;
+ kp->data = compat_ptr(p);
return 0;
}
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata = kp->data;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
- if (kp->size == 0)
- return 0;
-
- if (get_user(p, &up->data))
- return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- if (copy_in_user(udata, kdata, kp->size))
- return -EFAULT;
-
return 0;
}
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct uvc_fh *handle = file->private_data;
union {
struct uvc_xu_control_mapping xmap;
struct uvc_xu_control_query xqry;
} karg;
void __user *up = compat_ptr(arg);
- mm_segment_t old_fs;
long ret;
switch (cmd) {
case UVCIOC_CTRL_MAP32:
- cmd = UVCIOC_CTRL_MAP;
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
+ if (ret)
+ return ret;
+ ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+ if (ret)
+ return ret;
+ ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+ if (ret)
+ return ret;
+
break;
case UVCIOC_CTRL_QUERY32:
- cmd = UVCIOC_CTRL_QUERY;
ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
+ if (ret)
+ return ret;
+ ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
+ if (ret)
+ return ret;
+ ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+ if (ret)
+ return ret;
break;
default:
return -ENOIOCTLCMD;
}
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = video_ioctl2(file, cmd, (unsigned long)&karg);
- set_fs(old_fs);
-
- if (ret < 0)
- return ret;
-
- switch (cmd) {
- case UVCIOC_CTRL_MAP:
- ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
- break;
-
- case UVCIOC_CTRL_QUERY:
- ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
- break;
- }
-
return ret;
}
#endif
* The determine_valid_ioctls() call already should ensure
* that this can never happen, but just in case...
*/
- if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+ if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
return -ENOTTY;
if (ops->vidioc_cropcap)
/*
* Media Controller ancillary functions
*
- * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
* Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (C) 2006-2010 Nokia Corporation
* Copyright (c) 2016 Intel Corporation.
gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
- GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
+ GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
p->cycle2cyclesamecsen);
break;
case MAX77620:
fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
+ break;
default:
return -EINVAL;
}
break;
case MAX77620:
fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
+ break;
default:
return -EINVAL;
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->wait)) {
cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
- wake_up_interruptible(&cl->wait);
+ wake_up(&cl->wait);
}
}
goto idata_err;
}
- if (!idata->buf_bytes)
+ if (!idata->buf_bytes) {
+ idata->buf = NULL;
return idata;
+ }
idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (!err) {
+ if (err >= 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (!err) {
+ if (err >= 0) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro))
+ if (gpio_is_valid(gpio_ro)) {
ret = mmc_gpio_request_ro(mmc, gpio_ro);
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto out;
- } else {
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+ gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
}
if (gpio_is_valid(gpio_cd))
[SDXC_CLK_400K] = { .output = 180, .sample = 180 },
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 150, .sample = 120 },
- [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
- [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 },
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
};
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
- /* TODO MMC DDR is not working on A80 */
- if (of_device_is_compatible(pdev->dev.of_node,
- "allwinner,sun9i-a80-mmc"))
- mmc->caps &= ~MMC_CAP_1_8V_DDR;
-
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
/* detect availability of ELM module. Won't be present pre-OMAP4 */
info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
- if (!info->elm_of_node)
- dev_dbg(dev, "ti,elm-id not in DT\n");
+ if (!info->elm_of_node) {
+ info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+ if (!info->elm_of_node)
+ dev_dbg(dev, "ti,elm-id not in DT\n");
+ }
/* select ecc-scheme for NAND */
if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
*/
static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
{
- struct kstat stat;
int err, minor;
+ struct path path;
+ struct kstat stat;
/* Probably this is an MTD character device node path */
- err = vfs_stat(mtd_dev, &stat);
+ err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vfs_getattr(&path, &stat);
+ path_put(&path);
if (err)
return ERR_PTR(err);
return ERR_PTR(-EINVAL);
minor = MINOR(stat.rdev);
+
if (minor & 1)
/*
* Just do not think the "/dev/mtdrX" devices support is need,
int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
struct ubi_volume *vol = ubi->volumes[idx];
struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
- if (err) {
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+ ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
- data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf + offset, 0xFF, len);
memcpy(ubi->peb_buf + offset, buf, len);
+ data_size = offset + len;
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+ if (err) {
+ mutex_unlock(&ubi->buf_mutex);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
+ struct path path;
struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = vfs_stat(pathname, &stat);
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return ERR_PTR(error);
+
+ error = vfs_getattr(&path, &stat);
+ path_put(&path);
if (error)
return ERR_PTR(error);
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ 0, 0, 0, 0, 0, 0
+};
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+ MULTICAST_LACPDU_ADDR;
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
}
}
+static int __agg_active_ports(struct aggregator *agg)
+{
+ struct port *port;
+ int active = 0;
+
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (port->is_enabled)
+ active++;
+ }
+
+ return active;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
+ int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
- if (aggregator->num_of_ports) {
+ if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
- bandwidth = aggregator->num_of_ports;
+ bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
- bandwidth = aggregator->num_of_ports * 10;
+ bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
- bandwidth = aggregator->num_of_ports * 100;
+ bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
- bandwidth = aggregator->num_of_ports * 1000;
+ bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
- bandwidth = aggregator->num_of_ports * 2500;
+ bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_10000MBPS:
- bandwidth = aggregator->num_of_ports * 10000;
+ bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_20000MBPS:
- bandwidth = aggregator->num_of_ports * 20000;
+ bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_40000MBPS:
- bandwidth = aggregator->num_of_ports * 40000;
+ bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_56000MBPS:
- bandwidth = aggregator->num_of_ports * 56000;
+ bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
- bandwidth = aggregator->num_of_ports * 100000;
+ bandwidth = nports * 100000;
break;
default:
bandwidth = 0; /* to silence the compiler */
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
- if (curr->num_of_ports > best->num_of_ports)
+ if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
- if (curr->num_of_ports < best->num_of_ports)
+ if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
/*FALLTHROUGH*/
if (!port)
return 0;
- return netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev);
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev))
+ return 1;
+ }
+
+ return 0;
}
/**
agg->is_active = 0;
- if (agg->num_of_ports && agg_device_up(agg))
+ if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
* answering partner.
*/
if (active && active->lag_ports &&
- active->lag_ports->is_enabled &&
+ __agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
- aggregator->partner_system = null_mac_addr;
+ eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
if (aggregator) {
ad_clear_agg(aggregator);
- aggregator->aggregator_mac_address = null_mac_addr;
+ eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
- if (temp_aggregator->num_of_ports == 0) {
+ if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
+ struct aggregator *agg;
struct port *port;
+ bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
+ agg = __get_first_agg(port);
+ ad_agg_selection_logic(agg, &dummy);
+
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
- if (active->num_of_ports < bond->params.min_links) {
+ if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
}
/* check for initial state */
+ new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
---help---
- This driver supports the Geschwister Schneider USB/CAN devices.
+ This driver supports the Geschwister Schneider and bytewerk.org
+ candleLight USB CAN interfaces USB/CAN devices
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
+ - Kvaser Mini PCI Express 2xHS
+ - Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
- Kvaser USBcan Rugged ("USBcan Rev B")
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
*
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
*
* Many thanks to all socketcan devs!
*
#define USB_GSUSB_1_VENDOR_ID 0x1d50
#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
#define GSUSB_ENDPOINT_IN 1
#define GSUSB_ENDPOINT_OUT 2
static const struct usb_device_id gs_usb_table[] = {
{ USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+ USB_CANDLELIGHT_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
MODULE_DESCRIPTION(
"Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
MODULE_LICENSE("GPL v2");
#define USB_CAN_R_PRODUCT_ID 39
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
}
/* Kvaser USBCan-II devices */
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
/* USBCANII family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+ if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
- phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus",
+ bus->name = "Synopsys MII Bus";
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ /*
+ * When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
+
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
if (err) {
netdev_err(dev, "rx buffer allocation failed\n");
dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
return;
}
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ data[i] = *(unsigned long *)p;
}
}
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
-
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
dev_err(&bp->pdev->dev,
"Cannot map doorbells, bar size too small, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
doorbell_size);
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
if (IS_VF(bp)) {
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
}
/* Enable SRIOV if capability found in configuration space */
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
rc = bnx2x_set_int_mode(bp);
if (rc) {
dev_err(&pdev->dev, "Cannot set interrupts\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("set interrupts successfully\n");
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
return 0;
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
init_one_exit:
bnx2x_disable_pcie_error_reporting(bp);
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+
return features;
}
{
struct bnxt *bp = netdev_priv(dev);
u16 start = eeprom->offset, length = eeprom->len;
- int rc;
+ int rc = 0;
memset(data, 0, eeprom->len);
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
+ int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ if (!sq->sqs_mode) {
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+ tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+ tl4 += (svf * NIC_TL4_PER_LMAC);
+ tl4 += (bgx * NIC_TL4_PER_BGX);
+ }
tl4 += sq_idx;
- if (sq->sqs_mode)
- tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
unsigned int entry;
void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) {
+ dev->stats.tx_errors++;
+ goto out_no_free;
+ }
+
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
+out_no_free:
return NETDEV_TX_OK;
}
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
if (netdev->mem_end) {
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
} else {
/* Allocate buffer memory */
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
- goto error;
+ goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
- goto error;
+ goto free;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
- goto error;
+ goto free;
}
/* Allow the platform setup code to pass in a MAC address. */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
* re-adding ourselves to the poll list.
*/
- if (priv->tx_skb && !tx_ctrl_ct)
+ if (priv->tx_skb && !tx_ctrl_ct) {
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
napi_reschedule(napi);
+ }
}
return work_done;
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
+ ge_rst_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- continue;
- }
+ if (!skb)
+ goto skb_done;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
-
+skb_done:
/* Make sure the update to bdp and tx_skbuff are performed
* before dirty_tx
*/
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
u32 link_stat = priv->link;
struct hnae_handle *h;
- assert(priv && priv->ae_handle);
h = priv->ae_handle;
if (priv->phy) {
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- assert(priv);
-
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
struct hnae_handle *h;
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
h = priv->ae_handle;
ops = h->dev->ops;
struct hnae_ae_ops *ops;
int ret;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
cmd->version = HNS_CHIP_VERSION;
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (!ops->get_regs_len) {
netdev_err(net_dev, "ops->get_regs_len is null!\n");
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/seq_file.h>
+#include <linux/workqueue.h>
#include "ibmvnic.h"
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
ibmvnic_send_crq(adapter, &crq);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
+
return 0;
bounce_map_failed:
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
goto reg_failed;
}
- scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
- dev_err(dev, "Error mapping irq\n");
- goto map_irq_failed;
- }
-
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
return scrq;
-map_irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (adapter->tx_scrq[i]) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
+ irq_dispose_mapping(adapter->tx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
if (adapter->rx_scrq[i]) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
+ irq_dispose_mapping(adapter->rx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
adapter->requested_caps = 0;
}
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
return IRQ_HANDLED;
}
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int i = 0, j = 0;
+ int rc = 0;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ scrq = adapter->tx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_tx_irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ scrq = adapter->rx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_rx_irq_failed;
+ }
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", scrq);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+ return rc;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ release_sub_crqs_no_irqs(adapter);
+ return rc;
+}
+
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int total_queues;
int more = 0;
- int i, j;
- int rc;
+ int i;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
- rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", adapter->tx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
- adapter->tx_scrq[i]->irq, rc);
- goto req_tx_irq_failed;
- }
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
- rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", adapter->rx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
- adapter->rx_scrq[i]->irq, rc);
- goto req_rx_irq_failed;
- }
}
memset(&crq, 0, sizeof(crq));
return;
-req_rx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
- i = adapter->req_tx_queues;
-req_tx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
rx_failed:
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry(error_buff, &adapter->errors, list)
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
+ release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
out:
if (atomic_read(&adapter->running_cap_queries) == 0)
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
}
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry(error_buff, &adapter->errors,
- list) {
+ list_for_each_entry_safe(error_buff, tmp2,
+ &adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);
dev_info(dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvnic_send_crq_init_complete(adapter);
- if (rc == 0)
- send_version_xchg(adapter);
+ if (!rc)
+ schedule_work(&adapter->vnic_crq_init);
else
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
.release = single_release,
};
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ vnic_crq_init);
+ struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ send_version_xchg(adapter);
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+ }
+ } while (adapter->renegotiate);
+ rc = init_sub_crq_irqs(adapter);
+
+ if (rc)
+ goto task_failed;
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(dev,
+ "failed to register netdev rc=%d\n", rc);
+ goto register_failed;
+ }
+ dev_info(dev, "ibmvnic registered\n");
+
+ return;
+
+register_failed:
+ release_sub_crqs(adapter);
+task_failed:
+ dev_err(dev, "Passive initialization was not successful\n");
+}
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
+ unsigned long timeout = msecs_to_jiffies(30000);
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
+ INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
spin_lock_init(&adapter->stats_lock);
rc = ibmvnic_init_crq_queue(adapter);
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+ return 0;
do {
- adapter->renegotiate = false;
-
- init_sub_crqs(adapter, 0);
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
-
if (adapter->renegotiate) {
- release_sub_crqs(adapter);
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout))
+ return 0;
}
} while (adapter->renegotiate);
- /* if init_sub_crqs is partially successful, retry */
- while (!adapter->tx_scrq || !adapter->rx_scrq) {
- init_sub_crqs(adapter, 1);
-
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+ goto free_debugfs;
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_debugfs;
+ goto free_sub_crqs;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
+free_sub_crqs:
+ release_sub_crqs(adapter);
free_debugfs:
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+
+ struct work_struct vnic_crq_init;
};
}
/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
return features;
}
if (!vsi || !macaddr)
return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter
+ * is added as part of add VSI for any newly created VSI except
+ * FDIR VSI
+ */
+ if (is_broadcast_ether_addr(macaddr))
+ return NULL;
+
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
aq_ret, pf->hw.aq.asq_last_status);
}
}
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- }
}
out:
/* if something went wrong then set the changed flag so we try again */
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{
struct i40e_q_vector *q_vector;
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int v_idx, num_q_vectors;
- int err;
+ int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
else
return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask);
+
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err)
goto err_out;
+ current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+ if (unlikely(current_cpu >= nr_cpu_ids))
+ current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
+ i40e_status aq_ret = 0;
u8 laa_macaddr[ETH_ALEN];
bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
+ /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+ if (vsi->type != I40E_VSI_FDIR) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (aq_ret) {
+ ret = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status;
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
- if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
- I40E_RX_PTYPE_INNER_PROT_UDP |
- I40E_RX_PTYPE_INNER_PROT_SCTP))
- tunnel = true;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status;
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
- if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
- I40E_RX_PTYPE_INNER_PROT_UDP |
- I40E_RX_PTYPE_INNER_PROT_SCTP))
- tunnel = true;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0;
+ return min(work_done, budget - 1);
}
/**
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
return 0;
err_free_irq:
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
hwbm_pool->construct = mvneta_bm_construct;
hwbm_pool->priv = new_pool;
+ spin_lock_init(&hwbm_pool->lock);
/* Create new pool */
err = mvneta_bm_pool_create(priv, new_pool);
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
return 0;
err_free_bus:
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- dma_addr_t phy_ring_head, phy_ring_tail;
+ dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
+ ð->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head +
+ phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
return nfrags;
}
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
- if (unlikely(atomic_read(&ring->free_count) >
- ring->thresh))
- mtk_wake_queue(eth);
- }
+
spin_unlock_irqrestore(ð->page_lock, flags);
return NETDEV_TX_OK;
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
}
mtk_tx_unmap(eth->dev, tx_buf);
- ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
if (!total)
return 0;
- if (atomic_read(&ring->free_count) > ring->thresh)
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
return total;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
- MAX_SKB_FRAGS);
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS,
+ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
- eth->netdev[id]->watchdog_timeo = HZ;
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
* @rx_ring: Pointer to the memore holding info about the RX ring
* @rx_napi: The NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
struct mtk_rx_ring rx_ring;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
- spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- data[index++] = ((unsigned long *)&priv->stats)[i];
+ data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
tx_size == priv->tx_ring[0]->size)
return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
+ mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
!channel->tx_count || !channel->rx_count)
return -EINVAL;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
+ mlx4_en_safe_replace_resources(priv, tmp);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
+ if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
+ goto out;
+ }
}
- if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
- en_dbg(HW, priv, "failed adding vlan %d\n", vid);
- mutex_unlock(&mdev->state_lock);
+ err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+ if (err)
+ en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0;
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
}
mutex_unlock(&mdev->state_lock);
- return 0;
+ return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
}
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
- memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
- return &priv->ret_stats;
+ return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->tx_dropped = 0;
+ priv->tx_ring[i]->queue_stopped = 0;
+ priv->tx_ring[i]->wake_queue = 0;
+ priv->tx_ring[i]->tso_packets = 0;
+ priv->tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
return -ENOMEM;
}
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+ rtnl_lock();
+ netif_device_detach(dev);
+ mlx4_en_close(dev);
+ rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src,
+ struct mlx4_en_port_profile *prof)
+{
+ memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->tx_ring_num = prof->tx_ring_num;
+ dst->rx_ring_num = prof->rx_ring_num;
+ dst->flags = prof->flags;
+ dst->mdev = src->mdev;
+ dst->port = src->port;
+ dst->dev = src->dev;
+ dst->prof = prof;
+ dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+ dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_ring)
+ return -ENOMEM;
+
+ dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_cq) {
+ kfree(dst->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src)
+{
+ memcpy(dst->rx_ring, src->rx_ring,
+ sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+ memcpy(dst->rx_cq, src->rx_cq,
+ sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+ memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->tx_ring_num = src->tx_ring_num;
+ dst->rx_ring_num = src->rx_ring_num;
+ dst->tx_ring = src->tx_ring;
+ dst->tx_cq = src->tx_cq;
+ memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof)
+{
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+ if (mlx4_en_alloc_resources(tmp)) {
+ en_warn(priv,
+ "%s: Resource allocation failed, using previous configuration\n",
+ __func__);
+ kfree(tmp->tx_ring);
+ kfree(tmp->tx_cq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp)
+{
+ mlx4_en_free_resources(priv);
+ mlx4_en_update_priv(priv, tmp);
+}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ bool shutdown = mdev->dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- unregister_netdev(dev);
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+ unregister_netdev(dev);
}
if (priv->allocated)
mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv);
+#endif
+
mlx4_en_free_resources(priv);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- free_netdev(dev);
+ if (!shutdown)
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
- (skb->ip_summed == CHECKSUM_PARTIAL) &&
- (ip_hdr(skb)->version != 4))
- features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->vxlan_port ||
+ (ip_hdr(skb)->version != 4) ||
+ (udp_hdr(skb)->dest != priv->vxlan_port))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
return features;
}
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
return -EINVAL;
}
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+ ts_config.rx_filter,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type;
- priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+ mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
out:
mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
+ kfree(tmp);
+ if (!err)
+ netdev_features_change(dev);
return err;
}
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
- struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device *dev = mdev->pndev[port];
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
stats->tx_packets += ring->packets;
stats->tx_bytes += ring->bytes;
+ stats->tx_dropped += ring->tx_dropped;
priv->port_stats.tx_chksum_offload += ring->tx_csum;
priv->port_stats.queue_stopped += ring->queue_stopped;
priv->port_stats.wake_queue += ring->wake_queue;
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
- stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
- stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = 0;
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
- stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv);
-#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
bool inline_ok;
u32 ring_cons;
- if (!priv->port_up)
- goto tx_drop;
-
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
+ if (!priv->port_up)
+ goto tx_drop;
+
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons);
tx_drop:
dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
+ ring->tx_dropped++;
return NETDEV_TX_OK;
}
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+ /* Notify mlx4 clients that the kernel is being shut down */
+ persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
+ }
mutex_unlock(&persist->interface_state_mutex);
}
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
+ unsigned int tx_dropped;
struct mlx4_bf bf;
unsigned long queue_stopped;
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
+ u8 num_tx_rings_p_up;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
int inline_thold;
+ struct hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
struct mlx4_en_port_profile *prof;
struct net_device *dev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct net_device_stats stats;
- struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode, int node);
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
- case MLX5_CMD_OP_2ERR_QP:
- case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
+
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}
pr_debug("\n");
}
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
+ struct mlx5_cmd_work_ent,
+ cb_timeout_work);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+ cmd);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
}
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
-}
-
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
- err = ent->ret;
- } else {
- if (!wait_for_completion_timeout(&ent->done, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
+
+ err = ent->ret;
+
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
if (!callback)
init_completion(&ent->done);
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
goto out_free;
}
- if (!callback) {
- err = wait_func(dev, ent);
- if (err == -ETIMEDOUT)
- goto out;
-
- ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
- stats = &cmd->stats[op];
- spin_lock_irq(&stats->lock);
- stats->sum += ds;
- ++stats->n;
- spin_unlock_irq(&stats->lock);
- }
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
- "fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
- free_cmd(ent);
- }
+ if (callback)
+ goto out;
- return err;
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), ds);
+ *status = ent->status;
out_free:
free_cmd(ent);
return err;
}
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
-
down(&cmd->pages_sem);
- flush_workqueue(cmd->wq);
-
- cmd->mode = CMD_MODE_EVENTS;
+ cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
- int i;
-
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
-
- down(&cmd->pages_sem);
-
- flush_workqueue(cmd->wq);
- cmd->mode = CMD_MODE_POLLING;
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+ if (ent->callback)
+ cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
struct mlx5e_params {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_FLUSH_TIMEOUT,
};
struct mlx5e_cq {
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
u16 ix);
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
struct mlx5e_cq cq;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
+ mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
+ MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
};
enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
+ struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
struct mlx5_core_dev *mdev;
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ if (!ets->tc_tx_bw[i])
+ return -EINVAL;
+
bw_sum += ets->tc_tx_bw[i];
+ }
}
if (bw_sum != 0 && bw_sum != 100)
#define MLX5E_NUM_SQ_STATS(priv) \
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+ (hweight8(mlx5e_query_pfc_combined(priv)) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
/* SW counters */
for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].name);
+ vport_stats_desc[i].format);
/* PPORT counters */
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].name);
+ pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].name);
+ pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].name);
+ pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
- prio,
- pport_per_prio_traffic_stats_desc[i].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
}
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
- prio, pport_per_prio_pfc_stats_desc[i].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, prio);
}
}
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
- rq_stats_desc[j].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "tx%d_%s",
- priv->channeltc_to_txq_map[i][tc],
- sq_stats_desc[j].name);
+ sq_stats_desc[j].format,
+ priv->channeltc_to_txq_map[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
+ MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- else
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
+ }
}
static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ int err;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ if (err)
+ netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+}
+
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- s->lro_packets += rq_stats->lro_packets;
- s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_sw += rq_stats->csum_sw;
- s->rx_csum_inner += rq_stats->csum_inner;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
- s->tso_packets += sq_stats->tso_packets;
- s->tso_bytes += sq_stats->tso_bytes;
- s->tso_inner_packets += sq_stats->tso_inner_packets;
- s->tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
- s->tx_csum_inner += sq_stats->csum_offload_inner;
- tx_offload_none += sq_stats->csum_offload_none;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ tx_offload_none += sq_stats->csum_none;
}
}
/* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none -
- s->rx_csum_sw;
+ s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+ s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
- s->link_down_events = MLX5_GET(ppcnt_reg,
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
}
{
struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq->wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
+ int tout = 0;
+ int err;
+
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq))
- msleep(20);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+ tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+ if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
+ mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
+ int tout = 0;
+ int err;
+
if (sq->txq) {
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
/* prevent netif_tx_wake_queue */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_ERR);
+ if (err)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
- while (sq->cc != sq->pc) /* wait till sq is empty */
- msleep(20);
+ /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+ while (sq->cc != sq->pc &&
+ !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+ if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
+ mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
goto err_close_channels;
}
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_start_all_queues(priv->netdev);
+
kfree(cparam);
return 0;
{
int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
netdev_set_num_tc(netdev, ntc);
+ /* Map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+ netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
return features;
}
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool sched_work = false;
+ int i;
+
+ netdev_err(dev, "TX timeout detected\n");
+
+ for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+ if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ continue;
+ sched_work = true;
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ }
+
+ if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ schedule_work(&priv->tx_timeout_work);
+}
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
return -ENOMEM;
}
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct sk_buff *skb = rq->skb[ix];
+
+ if (skb) {
+ rq->skb[ix] = NULL;
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+}
+
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
return 0;
}
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+ wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
#define RQ_CANNOT_POST(rq) \
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- rq->stats.csum_sw++;
+ rq->stats.csum_complete++;
return;
}
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
- rq->stats.csum_inner++;
+ rq->stats.csum_unnecessary_inner++;
}
return;
}
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ return 0;
+
if (cq->decmprs_left)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
struct counter_desc {
- char name[ETH_GSTRING_LEN];
+ char format[ETH_GSTRING_LEN];
int offset; /* Byte offset */
};
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 lro_packets;
- u64 lro_bytes;
- u64 rx_csum_good;
+ u64 tx_tso_packets;
+ u64 tx_tso_bytes;
+ u64 tx_tso_inner_packets;
+ u64 tx_tso_inner_bytes;
+ u64 rx_lro_packets;
+ u64 rx_lro_bytes;
+ u64 rx_csum_unnecessary;
u64 rx_csum_none;
- u64 rx_csum_sw;
- u64 rx_csum_inner;
- u64 tx_csum_offload;
- u64 tx_csum_inner;
+ u64 rx_csum_complete;
+ u64 rx_csum_unnecessary_inner;
+ u64 tx_csum_partial;
+ u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_cqe_compress_pkts;
/* Special handling counters */
- u64 link_down_events;
+ u64 link_down_events_phy;
};
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
struct mlx5e_qcounter_stats {
};
static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_error_packets",
- VPORT_COUNTER_OFF(received_errors.packets) },
- { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
- { "tx_vport_error_packets",
- VPORT_COUNTER_OFF(transmit_errors.packets) },
- { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
{ "rx_vport_unicast_packets",
VPORT_COUNTER_OFF(received_eth_unicast.packets) },
{ "rx_vport_unicast_bytes",
};
static const struct counter_desc pport_802_3_stats_desc[] = {
- { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
- { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
- { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
- { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "unsupported_op_rx",
- PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "pause_ctrl_tx",
- PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
};
static const struct counter_desc pport_2863_stats_desc[] = {
- { "in_octets", PPORT_2863_OFF(if_in_octets) },
- { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
- { "in_discards", PPORT_2863_OFF(if_in_discards) },
- { "in_errors", PPORT_2863_OFF(if_in_errors) },
- { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
- { "out_octets", PPORT_2863_OFF(if_out_octets) },
- { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
- { "out_discards", PPORT_2863_OFF(if_out_discards) },
- { "out_errors", PPORT_2863_OFF(if_out_errors) },
- { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
- { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
- { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
- { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
};
static const struct counter_desc pport_2819_stats_desc[] = {
- { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
- { "octets", PPORT_2819_OFF(ether_stats_octets) },
- { "pkts", PPORT_2819_OFF(ether_stats_pkts) },
- { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
- { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
- { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
- { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
- { "fragments", PPORT_2819_OFF(ether_stats_fragments) },
- { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
- { "collisions", PPORT_2819_OFF(ether_stats_collisions) },
- { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "p1024to1518octets",
- PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "p1519to2047octets",
- PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "p2048to4095octets",
- PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "p4096to8191octets",
- PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "p8192to10239octets",
- PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+ { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
- u64 csum_sw;
- u64 csum_inner;
+ u64 csum_complete;
+ u64 csum_unnecessary_inner;
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
};
static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
};
struct mlx5e_sq_stats {
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
- u64 csum_offload_inner;
+ u64 csum_partial_inner;
u64 nop;
/* less likely accessed in data path */
- u64 csum_offload_none;
+ u64 csum_none;
u64 stopped;
u64 wake;
u64 dropped;
};
static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ /* channel_ix can be larger than num_channels since
+ * dev->num_real_tx_queues = num_channels * num_tc
+ */
+ if (channel_ix >= priv->params.num_channels)
+ channel_ix = reciprocal_scale(channel_ix,
+ priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
if (bf) {
u16 ihs = skb_headlen(skb);
return skb_headlen(skb);
}
- return MLX5E_MIN_INLINE;
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_offload_inner++;
+ sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
- sq->stats.csum_offload_none++;
+ sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
return mlx5e_sq_xmit(sq, skb);
}
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
sq = container_of(cq, struct mlx5e_sq, cq);
+ if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ return false;
+
npkts = 0;
nbytes = 0;
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
- if (IS_ERR_OR_NULL(fdb)) {
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
eth_zero_addr(dmac);
dmac[0] = 0x01;
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
goto out;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
goto out;
}
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
return err;
}
esw_fdb_set_vport_rule(esw,
mac,
vport_idx);
+ iter_vaddr->mc_promisc = true;
break;
case MLX5_ACTION_DEL:
if (!iter_vaddr)
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(vlan_grp)) {
+ if (IS_ERR(vlan_grp)) {
err = PTR_ERR(vlan_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(drop_grp)) {
+ if (IS_ERR(drop_grp)) {
err = PTR_ERR(drop_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
vport->vport, err);
vport->egress.drop_grp = drop_grp;
vport->egress.allowed_vlans_grp = vlan_grp;
out:
- kfree(flow_group_in);
+ kvfree(flow_group_in);
if (err && !IS_ERR_OR_NULL(vlan_grp))
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
vport->vport, err);
mlx5_destroy_flow_table(vport->ingress.acl);
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+ if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+ if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+ if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handle_locked(vport);
-
vport->enabled = true;
/* only PF is trusted by default */
vport->trusted = (vport_num) ? false : true;
-
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
- int err = 0;
struct mlx5_vport *evport;
+ u64 node_guid;
+ int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
mutex_lock(&esw->state_lock);
if (evport->enabled)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
-
return err;
}
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
{
int err = 0;
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_init_fc_stats(dev);
if (err)
return err;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev);
if (err)
goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(dev);
- if (err)
- goto err;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
}
return 0;
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
+ mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return;
+ goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ trigger_cmd_completions(dev);
+ }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
+
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}
mlx5_pci_err_detected(dev->pdev, 0);
}
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
*/
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_health *health = &dev->priv.health;
const int niter = 100;
+ u32 last_count = 0;
u32 count;
- u16 did;
int i;
- /* Wait for firmware to be ready after reset */
- msleep(1000);
- for (i = 0; i < niter; i++) {
- if (pci_read_config_word(pdev, 2, &did)) {
- dev_warn(&pdev->dev, "failed reading config word\n");
- break;
- }
- if (did == pdev->device) {
- dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
- break;
- }
- msleep(50);
- }
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
- break;
+ if (last_count && last_count != count) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ return 0;
+ }
+ last_count = count;
}
msleep(50);
}
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+ return -ETIMEDOUT;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
- wait_vital(pdev);
+ err = wait_vital(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ return;
+ }
err = mlx5_load_one(dev, priv);
if (err)
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};
func_id, npages, err);
goto out_4k;
}
- dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
return err;
}
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_manage_pages_inbox *in, int in_size,
+ struct mlx5_manage_pages_outbox *out, int out_size)
+{
+ struct fw_page *fwp;
+ struct rb_node *p;
+ u32 npages;
+ u32 i = 0;
+
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+ (u32 *)out, out_size);
+
+ npages = be32_to_cpu(in->num_entries);
+
+ p = rb_first(&dev->priv.page_root);
+ while (p && i < npages) {
+ fwp = rb_entry(p, struct fw_page, rb_node);
+ out->pas[i] = cpu_to_be64(fwp->addr);
+ p = rb_next(p);
+ i++;
+ }
+
+ out->num_entries = cpu_to_be32(i);
+ return 0;
+}
+
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed reclaiming pages\n");
- goto out_free;
- }
- dev->priv.fw_pages -= npages;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
err = -EINVAL;
goto out_free;
}
- if (nclaimed)
- *nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
+
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- free_4k(dev, fwp->addr);
- nclaimed = 1;
- } else {
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
- }
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
}
} while (p);
+ WARN(dev->priv.fw_pages,
+ "FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.fw_pages);
+ WARN(dev->priv.vfs_pages,
+ "VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.vfs_pages);
+
return 0;
}
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
struct mlx5e_vxlan *vxlan;
int err;
+ if (mlx5e_vxlan_lookup_port(priv, port))
+ goto free_work;
+
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
* Configures the switch priority to buffer table.
*/
#define MLXSW_REG_PPTB_ID 0x500B
-#define MLXSW_REG_PPTB_LEN 0x0C
+#define MLXSW_REG_PPTB_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_pptb = {
.id = MLXSW_REG_PPTB_ID,
*/
MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
mlxsw_reg_pptb_local_port_set(payload, local_port);
mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
}
/* PBMC - Port Buffer Management Control Register
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_is_up)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
- return 0;
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *p_module,
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
return 0;
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width)
-{
- u8 lane;
-
- return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
- p_width, &lane);
-}
-
static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 module, u8 width, u8 lane)
{
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- u8 module, width, lane;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
int err;
- err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- &module, &width, &lane);
- if (err) {
- netdev_err(dev, "Failed to retrieve module information\n");
- return err;
- }
-
if (!mlxsw_sp_port->split)
err = snprintf(name, len, "p%d", module + 1);
else
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+ SUPPORTED_Autoneg;
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
- bool is_up;
int err;
speed = ethtool_cmd_speed(cmd);
return err;
}
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
+ if (!netif_running(dev))
return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
return 0;
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
return local_port - offset;
}
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
+{
+ u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
u8 module, cur_width, base_port;
int i;
int err;
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
} dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
pptb_pl);
}
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
pptb_pl);
}
return err;
memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
return 0;
}
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
- if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+ if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+ pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
}
memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
return 0;
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
- if ((intflags & EIR_TXIF) != 0) {
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
- locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ BUG_ON(!priv->tx_skb);
+
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);
netif_tx_wake_all_queues(nn->netdev);
- enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
}
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
goto err_free_exn;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
{
unsigned int r;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
netif_carrier_off(nn->netdev);
nn->link_up = false;
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, bool dcbx_enabled)
{
- u8 tc, priority, priority_map;
+ u8 tc, priority_map;
enum dcbx_protocol_type type;
u16 protocol_id;
+ int priority;
bool enable;
int i;
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !!(type == DCBX_PROTOCOL_ETH);
+ enable = !(type == DCBX_PROTOCOL_ETH);
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
u16 num_pqs, multi_cos_tcs = 1;
+ u8 pf_wfq = qm_info->pf_wfq;
+ u32 pf_rl = qm_info->pf_rl;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
- qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_KERNEL);
+ qm_info->qm_pq_params = kcalloc(num_pqs,
+ sizeof(struct init_qm_pq_params),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
- qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_KERNEL);
+ qm_info->qm_vport_params = kcalloc(num_vports,
+ sizeof(struct init_qm_vport_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
- qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_KERNEL);
+ qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+ sizeof(struct init_qm_port_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
- qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
- GFP_KERNEL);
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->wfq_data)
goto alloc_err;
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return 0;
qed_qm_info_free(p_hwfn);
/* initialize qed's qm data structure */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, false);
if (rc)
return rc;
goto alloc_err;
/* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
hw_mode |= 1 << MODE_ASIC;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
u32 load_code, param;
int rc, mfw_rc, i;
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
{
int i;
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
#define MEDIA_DA_TWINAX 0x3
#define MEDIA_BASE_T 0x4
#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
#define MEDIA_KR 0xf0
#define MEDIA_NOT_PRESENT 0xff
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
start.vport_id, start.mtu);
}
- qed_reset_vport_stats(cdev);
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
return 0;
}
/* Fallthrough */
case QED_INT_MODE_MSI:
- rc = pci_enable_msi(cdev->pdev);
- if (!rc) {
- int_params->out.int_mode = QED_INT_MODE_MSI;
- goto out;
- }
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
- DP_NOTICE(cdev, "Failed to enable MSI\n");
- if (force_mode)
- goto out;
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
/* Fallthrough */
case QED_INT_MODE_INTA:
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- if (IS_PF(hwfn->cdev)) {
- memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
- } else {
- qed_vf_get_link_params(hwfn, ¶ms);
- qed_vf_get_link_state(hwfn, &link);
- qed_vf_get_link_caps(hwfn, &link_caps);
+ if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
}
/* Set the link parameters to pass to protocol driver */
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- rmb();
-
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */
- barrier();
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- mmiowb();
+ wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
*p_en2 = *p_ent;
- kfree(p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
p_ent = p_en2;
}
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ kfree(p_ent);
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
+#ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
- return QEDE_ETHTOOL_TEST_MAX;
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
{
struct qede_dev *edev = netdev_priv(dev);
- return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
max_tx_rate);
}
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_QEDE_VXLAN
static void qede_add_vxlan_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_link_state = qede_set_vf_link_state,
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;
if (!opcode)
return;
- ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
}
/* Disabling the timer */
- del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ del_timer_sync(&qdev->timer);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
return rc;
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ /* All our existing PIO buffers went away */
+ efx_for_each_channel(channel, efx)
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->piobuf = NULL;
+}
+
#else /* !EFX_USE_PIO */
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
{
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
#endif /* EFX_USE_PIO */
static void efx_ef10_remove(struct efx_nic *efx)
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
+ efx_ef10_forget_old_piobufs(efx);
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
- efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*efx->rps_flow_id),
- GFP_KERNEL);
- if (!efx->rps_flow_id) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_flow_id);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
const struct efx_farch_register_test *regs,
size_t n_regs)
{
- unsigned address = 0, i, j;
+ unsigned address = 0;
+ int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- * indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
+ unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const __be16 *ports;
- __be16 ether_type;
- int nhoff;
+ struct flow_keys fk;
int rc;
- /* The core RPS/RFS code has already parsed and validated
- * VLAN, IP and transport headers. We assume they are in the
- * header area.
- */
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- const struct vlan_hdr *vh =
- (const struct vlan_hdr *)skb->data;
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
- /* We can't filter on the IP 5-tuple and the vlan
- * together, so just strip the vlan header and filter
- * on the IP part.
- */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
- ether_type = vh->h_vlan_encapsulated_proto;
- nhoff = sizeof(struct vlan_hdr);
- } else {
- ether_type = skb->protocol;
- nhoff = 0;
- }
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = ether_type;
-
- if (ether_type == htons(ETH_P_IP)) {
- const struct iphdr *ip =
- (const struct iphdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- spec.ip_proto = ip->protocol;
- spec.rem_host[0] = ip->saddr;
- spec.loc_host[0] = ip->daddr;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- const struct ipv6hdr *ip6 =
- (const struct ipv6hdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(*ip6) + 4);
- spec.ip_proto = ip6->nexthdr;
- memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
- memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
- ports = (const __be16 *)(ip6 + 1);
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
- spec.rem_port = ports[0];
- spec.loc_port = ports[1];
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
- efx->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ channel = efx_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
- if (ether_type == htons(ETH_P_IP))
+ if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int index, size;
+ unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
- flow_id = efx->rps_flow_id[index];
- if (expire_one(efx, flow_id, index))
+ struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [flow %u]\n",
- index, flow_id);
- if (++index == size)
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
index = 0;
+ }
}
+ efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
- int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
- memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, dev) {
- dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
}
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
priv->rx_tail_addr,
STMMAC_CHAN0);
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
return -ENOMEM;
if (mdio_bus_data->irqs)
- memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
+ memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
#ifdef CONFIG_OF
if (priv->device->of_node)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec ts;
+ struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
#endif
static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
+ struct geneve_dev *geneve = netdev_priv(dev);
/* The max_mtu calculation does not take account of GENEVE
* options, to avoid excluding potentially valid
* configurations.
*/
- int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
- - dev->hard_header_len;
+ int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
+ if (geneve->remote.sa.sa_family == AF_INET6)
+ max_mtu -= sizeof(struct ipv6hdr);
+ else
+ max_mtu -= sizeof(struct iphdr);
if (new_mtu < 68)
return -EINVAL;
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
err = geneve_configure(net, dev, &geneve_remote_unspec,
0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
- if (err)
- goto err;
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
if (err)
goto err;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
return dev;
err:
- free_netdev(dev);
+ geneve_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
dev_put(dev);
}
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+ unsigned char **iv,
+ struct scatterlist **sg)
+{
+ size_t size, iv_offset, sg_offset;
+ struct aead_request *req;
+ void *tmp;
+
+ size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+ iv_offset = size;
+ size += GCM_AES_IV_LEN;
+
+ size = ALIGN(size, __alignof__(struct scatterlist));
+ sg_offset = size;
+ size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+ if (!tmp)
+ return NULL;
+
+ *iv = (unsigned char *)(tmp + iv_offset);
+ *sg = (struct scatterlist *)(tmp + sg_offset);
+ req = tmp;
+
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
macsec_fill_sectag(hh, secy, pn);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
- macsec_fill_iv(iv, secy->sci, pn);
-
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
return ERR_PTR(-EINVAL);
}
- req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
+ macsec_fill_iv(iv, secy->sci, pn);
+
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
out:
macsec_rxsa_put(rx_sa);
dev_put(dev);
- return;
}
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
struct macsec_secy *secy)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
u16 icv_len = secy->icv_len;
if (!skb)
return ERR_PTR(-ENOMEM);
- req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
if (!req) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (!tfm || IS_ERR(tfm))
return NULL;
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.OutPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
+ skb->dev = macsec->real_dev;
len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
genl_unregister_family(&macsec_fam);
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
+ rcu_barrier();
}
module_init(macsec_init);
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret;
- u16 val, delay;
+ int ret, val;
+ u16 delay;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
}
if (phy_interface_is_rgmii(phydev)) {
- ret = phy_write(phydev, MII_DP83867_PHYCTRL,
- (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
}
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
+#include <linux/idr.h>
#define MII_REGS_NUM 29
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+static DEFINE_IDA(phy_fixed_ida);
+
static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
if (gpio_is_valid(fp->link_gpio))
gpio_free(fp->link_gpio);
kfree(fp);
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
}
}
}
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- spin_lock(&phy_fixed_addr_lock);
- if (phy_fixed_addr == PHY_MAX_ADDR) {
- spin_unlock(&phy_fixed_addr_lock);
- return ERR_PTR(-ENOSPC);
- }
- phy_addr = phy_fixed_addr++;
- spin_unlock(&phy_fixed_addr_lock);
+ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ if (phy_addr < 0)
+ return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
- if (ret < 0)
+ if (ret < 0) {
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
+ }
phy = get_phy_device(fmb->mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
list_del(&fp->node);
kfree(fp);
}
+ ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);
return 0;
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ err = marvell_set_polarity(phydev, phydev->mdix);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+ MII_M1111_PHY_LED_DIRECT);
+ if (err < 0)
+ return err;
+
+ err = genphy_config_aneg(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ int bmcr;
+
+ /* A write to speed/duplex bits (that is performed by
+ * genphy_config_aneg() call above) must be followed by
+ * a software reset. Otherwise, the write has no effect.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
- err = genphy_config_aneg(phydev);
-
- return err;
+ return genphy_config_aneg(phydev);
}
static int m88e1318_config_aneg(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+ int err, oldpage;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
+ if (err < 0)
+ return err;
+
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+ /* Set marvell,reg-init configuration from device tree */
+ return marvell_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
return err;
}
- return marvell_config_init(phydev);
+ return m88e1121_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1111_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
* in all capable mode before using it.
*/
if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
- int timeout = 50000;
-
- /* set "all capable" mode and reset the phy */
+ /* set "all capable" mode */
rc |= MII_LAN83C185_MODE_ALL;
phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
- phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- /* wait end of reset (max 500 ms) */
- do {
- udelay(10);
- if (timeout-- == 0)
- return -1;
- rc = phy_read(phydev, MII_BMCR);
- } while (rc & BMCR_RESET);
}
- return 0;
+
+ /* reset the phy */
+ return genphy_soft_reset(phydev);
}
static int lan911x_config_init(struct phy_device *phydev)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
-static void __team_compute_features(struct team *team)
+static void ___team_compute_features(struct team *team)
{
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+}
+static void __team_compute_features(struct team *team)
+{
+ ___team_compute_features(team);
netdev_change_features(team->dev);
}
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
- __team_compute_features(team);
+ ___team_compute_features(team);
mutex_unlock(&team->lock);
+ netdev_change_features(team->dev);
}
static int team_port_enter(struct team *team, struct team_port *port)
goto err_dev_open;
}
+ netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
if (cdc_ncm_init(dev))
goto error2;
+ /* Some firmwares need a pause here or they will silently fail
+ * to set up the interface properly. This value was decided
+ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
+ * firmware.
+ */
+ usleep_range(10000, 20000);
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
goto goon;
}
- if (!count || count < 4)
+ if (count < 4)
goto goon;
rx_status = buf[count - 2];
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
/* Information for net-next */
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "3"
+#define NET_VERSION "5"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
+#define USB_BMU_RESET 0xd4b0
#define USB_UPS_CTRL 0xd800
#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN 0x01
+#define BMU_RESET_EP_OUT 0x02
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */
+#define AD_MASK 0xfee0
+#define EFUSE 0xcfdb
+#define PASS_THRU_MASK 0x1
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
int intr_interval;
return ret;
}
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ int ret = -EINVAL;
+ u32 ocp_data;
+ unsigned char buf[6];
+
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) != 0x1000)
+ return -ENODEV;
+
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1)
+ return -ENODEV;
+
+ /* returns _AUXMAC_#AABBCCDDEEFF# */
+ status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ obj = (union acpi_object *)buffer.pointer;
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+ if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid buffer when reading pass-thru MAC addr: "
+ "(%d, %d)\n",
+ obj->type, obj->string.length);
+ goto amacout;
+ }
+ if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+ strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid header when reading pass-thru MAC addr\n");
+ goto amacout;
+ }
+ ret = hex2bin(buf, obj->string.pointer + 9, 6);
+ if (!(ret == 0 && is_valid_ether_addr(buf))) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid MAC when reading pass-thru MAC addr: "
+ "%d, %pM\n", ret, buf);
+ ret = -EINVAL;
+ goto amacout;
+ }
+ memcpy(sa->sa_data, buf, 6);
+ ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+ netif_info(tp, probe, tp->netdev,
+ "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+ kfree(obj);
+ return ret;
+}
+
static int set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
if (tp->version == RTL_VER_01)
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
- else
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ else {
+ /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+ * or system doesn't provide valid _SB.AMAC this will be
+ * be expected to non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, &sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ }
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
static void r8153_set_rx_early_size(struct r8152 *tp)
{
u32 mtu = tp->netdev->mtu;
- u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+ u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
u32 ocp_data;
u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- if (!(ocp_data & LAN_WAKE_EN))
- return 0;
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
if (ocp_data & LINK_ON_WAKE_EN)
wolopts |= WAKE_PHY;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
if (wolopts & WAKE_UCAST)
ocp_data |= UWF_EN;
if (wolopts & WAKE_BCAST)
ocp_data |= BWF_EN;
if (wolopts & WAKE_MCAST)
ocp_data |= MWF_EN;
- if (wolopts & WAKE_ANY)
- ocp_data |= LAN_WAKE_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
if (enable) {
u32 ocp_data;
- r8153_u1u2en(tp, false);
- r8153_u2p3en(tp, false);
-
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
+ u32 ocp_data;
+
__rtl_set_wol(tp, tp->saved_wolopts);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+ rtl_runtime_suspend_enable(tp, enable);
+
+ if (enable) {
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ } else {
r8153_u2p3en(tp, true);
r8153_u1u2en(tp, true);
}
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
}
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+ ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
r8153_hw_phy_cfg(tp);
rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
{
r8153_aldps_en(tp, false);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
usb_enable_lpm(tp->udev);
}
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
- U1U2_SPDWN_EN | L1_SPDWN_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
- TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
- EEE_SPDWN_EN);
+ /* MAC clock speed down */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);
napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
- rtl_runtime_suspend_enable(tp, true);
+ tp->rtl_ops.autosuspend_en(tp, true);
} else {
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
if (tp->netdev->flags & IFF_UP)
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
case RTL_VER_03:
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->autosuspend_en = rtl8153_runtime_enable;
break;
default:
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ bool link_ok;
+ struct delayed_work carrier_check;
+ struct usbnet *dev;
};
static bool turbo_mode = true;
intdata);
}
+static void set_carrier(struct usbnet *dev, bool link)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ if (pdata->link_ok == link)
+ return;
+
+ pdata->link_ok = link;
+
+ if (link)
+ usbnet_link_change(dev, 1, 0);
+ else
+ usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+ struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+ carrier_check.work);
+ struct usbnet *dev = pdata->dev;
+ int ret;
+
+ if (pdata->suspend_flags != 0)
+ return;
+
+ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+ return;
+ }
+ if (ret & BMSR_LSTATUS)
+ set_carrier(dev, 1);
+ else
+ set_carrier(dev, 0);
+
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ pdata->dev = dev;
+ INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
return 0;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
if (pdata) {
+ cancel_delayed_work(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size)
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ usbnet_pause_rx(dev);
usbnet_unlink_rx_urbs(dev);
+ usbnet_resume_rx(dev);
+ }
}
/* max qlen depend on hard_mtu and rx_urb_size */
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
- !timer_pending (&dev->delay) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {
virtio_device_ready(vdev);
- /* Last of all, set up some receive buffers. */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].vq->num_free ==
- virtqueue_get_vring_size(vi->rq[i].vq)) {
- free_unused_bufs(vi);
- err = -ENOMEM;
- goto free_recv_bufs;
- }
- }
-
vi->nb.notifier_call = &virtnet_cpu_callback;
err = register_hotcpu_notifier(&vi->nb);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_recv_bufs;
+ goto free_unregister_netdev;
}
/* Assume link up if device can't report link status,
return 0;
-free_recv_bufs:
+free_unregister_netdev:
vi->vdev->config->reset(vdev);
- free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
- BUG_ON(segCnt <= 1);
+ WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
dst_hold(&rt6->dst);
rt6->rt6i_table = rt6i_table;
- rt6->dst.output = vrf_output6;
+ rt6->dst.output = vrf_output6;
rcu_assign_pointer(vrf->rt6, rt6);
rc = 0;
if (!rth)
return -ENOMEM;
- rth->dst.output = vrf_output;
+ rth->dst.output = vrf_output;
rth->rt_table_id = vrf->tb_id;
rcu_assign_pointer(vrf->rth, rth);
return 0;
}
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
- u8 name_assign_type, struct vxlan_config *conf)
-{
- struct nlattr *tb[IFLA_MAX+1];
- struct net_device *dev;
- int err;
-
- memset(&tb, 0, sizeof(tb));
-
- dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
- if (IS_ERR(dev))
- return dev;
-
- err = vxlan_dev_configure(net, dev, conf);
- if (err < 0) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ if (tb[IFLA_MTU])
+ conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+
err = vxlan_dev_configure(src_net, dev, &conf);
switch (err) {
case -ENODEV:
.get_link_net = vxlan_get_link_net,
};
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0) {
+ LIST_HEAD(list_kill);
+
+ vxlan_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
- ar->running_fw->fw_file.fw_features,
+ fw_file->fw_features,
sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
return;
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
peer = ath10k_peer_find(ar, vdev_id, addr);
if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
addr, vdev_id);
ath10k_wmi_peer_delete(ar, vdev_id, addr);
- spin_unlock_bh(&ar->data_lock);
return -ENOENT;
}
#define AR9300_NUM_GPIO 16
#define AR9330_NUM_GPIO 16
#define AR9340_NUM_GPIO 23
-#define AR9462_NUM_GPIO 10
+#define AR9462_NUM_GPIO 14
#define AR9485_NUM_GPIO 12
#define AR9531_NUM_GPIO 18
#define AR9550_NUM_GPIO 24
#define AR9561_NUM_GPIO 23
-#define AR9565_NUM_GPIO 12
+#define AR9565_NUM_GPIO 14
#define AR9580_NUM_GPIO 16
#define AR7010_NUM_GPIO 16
#define AR9300_GPIO_MASK 0x0000F4FF
#define AR9330_GPIO_MASK 0x0000F4FF
#define AR9340_GPIO_MASK 0x0000000F
-#define AR9462_GPIO_MASK 0x000003FF
+#define AR9462_GPIO_MASK 0x00003FFF
#define AR9485_GPIO_MASK 0x00000FFF
#define AR9531_GPIO_MASK 0x0000000F
#define AR9550_GPIO_MASK 0x0000000F
#define AR9561_GPIO_MASK 0x0000000F
-#define AR9565_GPIO_MASK 0x00000FFF
+#define AR9565_GPIO_MASK 0x00003FFF
#define AR9580_GPIO_MASK 0x0000F4FF
#define AR7010_GPIO_MASK 0x0000FFFF
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
+ int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
+ } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
+ if (err) {
+ brcmf_err("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
}
}
done:
brcmu_pkt_buf_free_skb(skb);
return;
}
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
brcmf_netif_rx(ifp, skb);
}
if (idx != 0)
return -ENOENT;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
struct sk_buff *tail;
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
/* not a data packet */
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return -EIO;
}
-#define SCAN_TIMEOUT (16 * HZ)
+#define SCAN_TIMEOUT (20 * HZ)
void iwl_mvm_scan_timeout(unsigned long data)
{
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
u8 sta_id = mvmvif->ap_sta_id;
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
* be the AP ID, and no station was passed by mac80211.
*/
- return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
}
return NULL;
struct ieee80211_key_seq seq;
const u8 *pn;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
!info->attrs[HWSIM_ATTR_FLAGS] ||
!info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_SIGNAL] ||
!info->attrs[HWSIM_ATTR_TX_INFO])
goto out;
for (i = 0; i < retry; i++) {
path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
- if (path_a_ok == 0x03) {
+ if (path_b_ok == 0x03) {
val32 = rtl8xxxu_read32(priv,
REG_RX_POWER_BEFORE_IQK_B_2);
result[t][6] = (val32 >> 16) & 0x3ff;
void rtl_addr_delay(u32 addr)
{
if (addr == 0xfe)
- msleep(50);
+ mdelay(50);
else if (addr == 0xfd)
msleep(5);
else if (addr == 0xfc)
rtl_addr_delay(addr);
} else {
rtl_set_rfreg(hw, rfpath, addr, mask, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_rfreg_delay);
rtl_addr_delay(addr);
} else {
rtl_set_bbreg(hw, addr, MASKDWORD, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_bb_delay);
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
+ unsigned long align;
+ enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
return -ENXIO;
}
+ align = le32_to_cpu(pfn_sb->align);
+ offset = le64_to_cpu(pfn_sb->dataoff);
+ if (align == 0)
+ align = 1UL << ilog2(offset);
+ mode = le32_to_cpu(pfn_sb->mode);
+
if (!nd_pfn->uuid) {
- /* from probe we allocate */
+ /*
+ * When probing a namepace via nd_pfn_probe() the uuid
+ * is NULL (see: nd_pfn_devinit()) we init settings from
+ * pfn_sb
+ */
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
if (!nd_pfn->uuid)
return -ENOMEM;
+ nd_pfn->align = align;
+ nd_pfn->mode = mode;
} else {
- /* from init we validate */
+ /*
+ * When probing a pfn / dax instance we validate the
+ * live settings against the pfn_sb
+ */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -ENODEV;
+
+ /*
+ * If the uuid validates, but other settings mismatch
+ * return EINVAL because userspace has managed to change
+ * the configuration without specifying new
+ * identification.
+ */
+ if (nd_pfn->align != align || nd_pfn->mode != mode) {
+ dev_err(&nd_pfn->dev,
+ "init failed, settings mismatch\n");
+ dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
+ nd_pfn->align, align, nd_pfn->mode,
+ mode);
+ return -EINVAL;
+ }
}
- if (nd_pfn->align == 0)
- nd_pfn->align = le32_to_cpu(pfn_sb->align);
- if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+ if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
- nd_pfn->align, nvdimm_namespace_capacity(ndns));
+ align, nvdimm_namespace_capacity(ndns));
return -EINVAL;
}
* namespace has changed since the pfn superblock was
* established.
*/
- offset = le64_to_cpu(pfn_sb->dataoff);
nsio = to_nd_namespace_io(&ndns->dev);
if (offset >= resource_size(&nsio->res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
return -EBUSY;
}
- if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
+ if ((align && !IS_ALIGNED(offset, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
- dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
- offset);
+ dev_err(&nd_pfn->dev,
+ "bad offset: %#llx dax disabled align: %#lx\n",
+ offset, align);
return -ENXIO;
}
res->start += start_pad;
res->end -= end_trunc;
- nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < SZ_8K)
return ERR_PTR(-EINVAL);
return nsa->ns_id - nsb->ns_id;
}
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns *ns;
-
- lockdep_assert_held(&ctrl->namespaces_mutex);
+ struct nvme_ns *ns, *ret = NULL;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid)
- return ns;
+ if (ns->ns_id == nsid) {
+ kref_get(&ns->kref);
+ ret = ns;
+ break;
+ }
if (ns->ns_id > nsid)
break;
}
- return NULL;
+ mutex_unlock(&ctrl->namespaces_mutex);
+ return ret;
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
int node = dev_to_node(ctrl->dev);
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
- list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_add_tail(&ns->list, &ctrl->namespaces);
+ mutex_unlock(&ctrl->namespaces_mutex);
+
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
static void nvme_ns_remove(struct nvme_ns *ns)
{
- lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+
+ mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
- synchronize_rcu();
+ mutex_unlock(&ns->ctrl->namespaces_mutex);
+
nvme_put_ns(ns);
}
{
struct nvme_ns *ns;
- ns = nvme_find_ns(ctrl, nsid);
+ ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
} else
nvme_alloc_ns(ctrl, nsid);
}
nvme_validate_ns(ctrl, nsid);
while (++prev < nsid) {
- ns = nvme_find_ns(ctrl, prev);
- if (ns)
+ ns = nvme_find_get_ns(ctrl, prev);
+ if (ns) {
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
}
}
nn -= j;
struct nvme_ns *ns, *next;
unsigned i;
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
if (nvme_identify_ctrl(ctrl, &id))
return;
- mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
}
nvme_scan_ns_sequential(ctrl, nn);
done:
+ mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
}
EXPORT_SYMBOL_GPL(nvme_queue_scan);
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
- mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
- mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
- if (!kref_get_unless_zero(&ns->kref))
- continue;
-
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
-
- nvme_put_ns(ns);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int bars;
+
if (dev->bar)
iounmap(dev->bar);
- pci_release_regions(to_pci_dev(dev->dev));
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ pci_release_selected_regions(pdev, bars);
}
static void nvme_pci_disable(struct nvme_dev *dev)
return 0;
release:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, bars);
return -ENODEV;
}
struct device_node **nodepp)
{
struct device_node *root;
- int offset = 0, depth = 0;
+ int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
unsigned int fpsizes[FDT_MAX_DEPTH];
struct device_node *nps[FDT_MAX_DEPTH];
if (nodepp)
*nodepp = NULL;
+ /*
+ * We're unflattening device sub-tree if @dad is valid. There are
+ * possibly multiple nodes in the first level of depth. We need
+ * set @depth to 1 to make fdt_next_node() happy as it bails
+ * immediately when negative @depth is found. Otherwise, the device
+ * nodes except the first one won't be unflattened successfully.
+ */
+ if (dad)
+ depth = initial_depth = 1;
+
root = dad;
fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
nps[depth] = dad;
+
for (offset = 0;
- offset >= 0 && depth >= 0;
+ offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
continue;
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
- * @index: zero-based index of the irq
- *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created.
+ * @index: zero-based index of the IRQ
*
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
*/
int of_irq_get(struct device_node *dev, int index)
{
EXPORT_SYMBOL_GPL(of_irq_get);
/**
- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
- * @name: irq name
+ * @name: IRQ name
*
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created, or error code in case of any other failure.
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
*/
int of_irq_get_byname(struct device_node *dev, const char *name)
{
}
/* Need adjust the alignment to satisfy the CMA requirement */
- if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
- align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+ if (IS_ENABLED(CONFIG_CMA)
+ && of_flat_dt_is_compatible(node, "shared-dma-pool")
+ && of_get_flat_dt_prop(node, "reusable", NULL)
+ && !of_get_flat_dt_prop(node, "no-map", NULL)) {
+ unsigned long order =
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+ align = max(align, (phys_addr_t)PAGE_SIZE << order);
+ }
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
else
pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
*(u16 *)buf);
- buf += 2;
+ buf += 4;
}
- len += 2;
+ len += 4;
/*
* If we have any Low Priority VCs and a VC Arbitration Table Offset
/* For SPIs, we need to track the affinity per IRQ */
if (using_spi) {
- if (i >= pdev->num_resources) {
- of_node_put(dn);
+ if (i >= pdev->num_resources)
break;
- }
irqs[i] = cpu;
}
/* Keep track of the CPUs containing this PMU type */
cpumask_set_cpu(cpu, &pmu->supported_cpus);
- of_node_put(dn);
i++;
} while (1);
armpmu_init(pmu);
- if (!__oprofile_cpu_pmu)
- __oprofile_cpu_pmu = pmu;
-
pmu->plat_device = pdev;
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
if (!ret)
ret = init_fn(pmu);
} else {
- ret = probe_current_pmu(pmu, probe_table);
cpumask_setall(&pmu->supported_cpus);
+ ret = probe_current_pmu(pmu, probe_table);
}
if (ret) {
if (ret)
goto out_destroy;
+ if (!__oprofile_cpu_pmu)
+ __oprofile_cpu_pmu = pmu;
+
pr_info("enabled with %s PMU driver, %d counters available\n",
pmu->name, pmu->num_events);
out_free:
pr_info("%s: failed to register PMU devices!\n",
of_node_full_name(node));
+ kfree(pmu->irq_affinity);
kfree(pmu);
return ret;
}
}
usb2->phy = devm_phy_create(dev, NULL, &ops);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ if (IS_ERR(usb2->phy))
+ return PTR_ERR(usb2->phy);
phy_set_drvdata(usb2->phy, usb2);
platform_set_drvdata(pdev, usb2);
struct exynos_mipi_video_phy *state)
{
u32 val;
+ int ret;
+
+ ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+ if (ret)
+ return 0;
- regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
return val & data->resetn_val;
}
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
- miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst");
+ miphy_phy->miphy_rst =
+ of_reset_control_get_shared(node, "miphy-sw-rst");
if (IS_ERR(miphy_phy->miphy_rst)) {
dev_err(miphy_dev->dev,
extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
}
-static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
-{
- return !!(readl(ch->base + USB2_ADPCTRL) &
- USB2_ADPCTRL_OTGSESSVLD);
-}
-
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
{
- bool is_host = true;
-
- /* B-device? */
- if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
- is_host = false;
-
- if (is_host)
+ if (!rcar_gen3_check_id(ch))
rcar_gen3_init_for_host(ch);
else
rcar_gen3_init_for_peri(ch);
return -ENODEV;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
- if (IS_ERR(dp))
+ if (!dp)
return -ENOMEM;
dp->dev = dev;
phy_dev->dev = dev;
dev_set_drvdata(dev, phy_dev);
- phy_dev->rstc = devm_reset_control_get(dev, "global");
+ phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
if (IS_ERR(phy_dev->rstc)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstc);
}
- phy_dev->rstport = devm_reset_control_get(dev, "port");
+ phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
if (IS_ERR(phy_dev->rstport)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstport);
{
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
- void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+ void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
int i;
mutex_lock(&phy_data->mutex);
if (data->vbus_power_nb_registered)
power_supply_unreg_notifier(&data->vbus_power_nb);
- if (data->id_det_irq >= 0)
+ if (data->id_det_irq > 0)
devm_free_irq(dev, data->id_det_irq, data);
- if (data->vbus_det_irq >= 0)
+ if (data->vbus_det_irq > 0)
devm_free_irq(dev, data->vbus_det_irq, data);
cancel_delayed_work_sync(&data->detect);
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
- if ((data->id_det_gpio && data->id_det_irq < 0) ||
- (data->vbus_det_gpio && data->vbus_det_irq < 0))
+ if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+ (data->vbus_det_gpio && data->vbus_det_irq <= 0))
data->phy0_poll = true;
- if (data->id_det_irq >= 0) {
+ if (data->id_det_irq > 0) {
ret = devm_request_irq(dev, data->id_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
}
}
- if (data->vbus_det_irq >= 0) {
+ if (data->vbus_det_irq > 0) {
ret = devm_request_irq(dev, data->vbus_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
ret = ti_pipe3_dpll_wait_lock(phy);
}
- /* Program the DPLL only if not locked */
+ /* SATA has issues if re-programmed when locked */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (!(val & PLL_LOCK))
- if (ti_pipe3_dpll_program(phy))
- return -EINVAL;
+ if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
+ "ti,phy-pipe3-sata"))
+ return ret;
+
+ /* Program the DPLL */
+ ret = ti_pipe3_dpll_program(phy);
+ if (ret) {
+ ti_pipe3_disable_clocks(phy);
+ return -EINVAL;
+ }
return ret;
}
twl4030_usb_set_mode(twl, twl->usb_mode);
if (twl->usb_mode == T2_USB_MODE_ULPI)
twl4030_i2c_access(twl, 0);
- schedule_delayed_work(&twl->id_workaround_work, 0);
+ twl->linkstat = MUSB_UNKNOWN;
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
return 0;
}
struct twl4030_usb *twl = _twl;
enum musb_vbus_id_status status;
bool status_changed = false;
+ int err;
status = twl4030_usb_linkstat(twl);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
}
- musb_mailbox(status);
+ err = musb_mailbox(status);
+ if (err)
+ twl->linkstat = MUSB_UNKNOWN;
}
/* don't schedule during sleep - irq works right then */
struct twl4030_usb *twl = phy_get_drvdata(phy);
pm_runtime_get_sync(twl->dev);
- schedule_delayed_work(&twl->id_workaround_work, 0);
+ twl->linkstat = MUSB_UNKNOWN;
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
if (cable_present(twl->linkstat))
pm_runtime_put_noidle(twl->dev);
pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put_sync_suspend(twl->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(twl->dev);
pm_runtime_disable(twl->dev);
/* autogate 60MHz ULPI clock,
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
-obj-$(CONFIG_PINCTRL_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
pin_reg = &info->pin_regs[pin_id];
if (pin_reg->mux_reg == -1) {
- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
info->pins[pin_id].name);
- return -EINVAL;
+ continue;
}
if (info->flags & SHARE_MUX_CONF_REG) {
static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
-static const unsigned int byt_score_plt_clk4_pins[] = { 99 };
-static const unsigned int byt_score_plt_clk5_pins[] = { 100 };
-static const unsigned int byt_score_plt_clk3_pins[] = { 101 };
+static const unsigned int byt_score_plt_clk3_pins[] = { 99 };
+static const unsigned int byt_score_plt_clk4_pins[] = { 100 };
+static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
SIMPLE_FUNC("plt_clk", 1),
};
const struct mtk_desc_pin *pin;
chained_irq_enter(chip, desc);
- for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+ for (eint_num = 0;
+ eint_num < pctl->devdata->ap_num;
+ eint_num += 32, reg += 4) {
status = readl(reg);
- reg += 4;
while (status) {
offset = __ffs(status);
index = eint_num + offset;
clk_enable(nmk_chip->clk);
- dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
clk_disable(nmk_chip->clk);
else
mask &= ~soc_mask;
pcs->write(mask, pcswi->reg);
+
+ /* flush posted write */
+ mask = pcs->read(pcswi->reg);
raw_spin_unlock(&pcs->lock);
}
-obj-y += pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
goto exit;
}
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
config DELL_LAPTOP
tristate "Dell Laptop Extras"
- depends on X86
depends on DELL_SMBIOS
depends on DMI
depends on BACKLIGHT_CLASS_DEVICE
config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
- depends on INPUT && X86
+ depends on INPUT
select INPUT_POLLDEV
default n
help
config ACPI_CMPC
tristate "CMPC Laptop Extras"
- depends on X86 && ACPI
+ depends on ACPI
depends on RFKILL || RFKILL=n
select INPUT
select BACKLIGHT_CLASS_DEVICE
config INTEL_PMC_CORE
bool "Intel PMC Core driver"
- depends on X86 && PCI
+ depends on PCI
---help---
The Intel Platform Controller Hub for Intel Core SoCs provides access
to Power Management Controller registers via a PCI interface. This
config IBM_RTL
tristate "Device driver to enable PRTL support"
- depends on X86 && PCI
+ depends on PCI
---help---
Enable support for IBM Premium Real Time Mode (PRTM).
This module will allow you the enter and exit PRTM in the BIOS via
config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
- depends on X86
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 7, { KEY_CAMERA } },
+ { KE_KEY, 8, { KEY_MICMUTE } },
{ KE_KEY, 11, { KEY_F16 } },
{ KE_KEY, 13, { KEY_WLAN } },
{ KE_KEY, 16, { KEY_PROG1 } },
break;
case 13:
case 11:
+ case 8:
case 7:
case 6:
ideapad_input_report(priv, vpc_bit);
static u32 hotkey_orig_mask; /* events the BIOS had enabled */
static u32 hotkey_all_mask; /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */
static u32 hotkey_reserved_mask; /* events better left disabled */
static u32 hotkey_driver_mask; /* events needed by the driver */
static u32 hotkey_user_mask; /* events visible to userspace */
static DEVICE_ATTR_RO(hotkey_all_mask);
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
/* sysfs hotkey recommended_mask --------------------------------------- */
static ssize_t hotkey_recommended_mask_show(struct device *dev,
struct device_attribute *attr,
&dev_attr_wakeup_hotunplug_complete.attr,
&dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_adaptive_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
&dev_attr_hotkey_source_mask.attr,
if (!tp_features.hotkey)
return 1;
- /*
- * Check if we have an adaptive keyboard, like on the
- * Lenovo Carbon X1 2014 (2nd Gen).
- */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- tp_features.has_adaptive_kbd = true;
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &adaptive_kbd_attr_group);
- if (res)
- goto err_exit;
- }
- }
-
quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
ARRAY_SIZE(tpacpi_hotkey_qtable));
A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) != 1) {
- pr_err("unknown version of the HKEY interface: 0x%x\n",
- hkeyv);
- pr_err("please report this to %s\n", TPACPI_MAIL);
- } else {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "firmware HKEY interface version: 0x%x\n",
+ hkeyv);
+
+ switch (hkeyv >> 8) {
+ case 1:
/*
* MHKV 0x100 in A31, R40, R40e,
* T4x, X31, and later
*/
- vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "firmware HKEY interface version: 0x%x\n",
- hkeyv);
/* Paranoia check AND init hotkey_all_mask */
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- pr_err("missing MHKA handler, "
- "please report this to %s\n",
+ pr_err("missing MHKA handler, please report this to %s\n",
TPACPI_MAIL);
/* Fallback: pre-init for FN+F3,F4,F12 */
hotkey_all_mask = 0x080cU;
} else {
tp_features.hotkey_mask = 1;
}
+ break;
+
+ case 2:
+ /*
+ * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+ */
+
+ /* Paranoia check AND init hotkey_all_mask */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "dd", 1)) {
+ pr_err("missing MHKA handler, please report this to %s\n",
+ TPACPI_MAIL);
+ /* Fallback: pre-init for FN+F3,F4,F12 */
+ hotkey_all_mask = 0x080cU;
+ } else {
+ tp_features.hotkey_mask = 1;
+ }
+
+ /*
+ * Check if we have an adaptive keyboard, like on the
+ * Lenovo Carbon X1 2014 (2nd Gen).
+ */
+ if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+ "MHKA", "dd", 2)) {
+ if (hotkey_adaptive_all_mask != 0) {
+ tp_features.has_adaptive_kbd = true;
+ res = sysfs_create_group(
+ &tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+ if (res)
+ goto err_exit;
+ }
+ } else {
+ tp_features.has_adaptive_kbd = false;
+ hotkey_adaptive_all_mask = 0x0U;
+ }
+ break;
+
+ default:
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
+ break;
}
}
WARN_ON(tzd == NULL);
psy = tzd->devdata;
- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ if (ret)
+ return ret;
/* Convert tenths of degree Celsius to milli degree Celsius. */
- if (!ret)
- *temp = val.intval * 100;
+ *temp = val.intval * 100;
return ret;
}
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_charger *charger;
+ struct power_supply_config cfg = {};
int ret;
dev_dbg(&pdev->dev, "%s\n", __func__);
charger->tps = tps;
charger->dev = &pdev->dev;
+ cfg.of_node = pdev->dev.of_node;
+ cfg.drv_data = charger;
+
charger->ac = devm_power_supply_register(&pdev->dev,
&tps65217_charger_desc,
- NULL);
+ &cfg);
if (IS_ERR(charger->ac)) {
dev_err(&pdev->dev, "failed: power supply register\n");
return PTR_ERR(charger->ac);
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
- if (strcmp(pardev->name, KBUILD_MODNAME))
+ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
break;
case PTP_SYS_OFFSET:
- sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
- if (!sysoff) {
- err = -ENOMEM;
- break;
- }
- if (copy_from_user(sysoff, (void __user *)arg,
- sizeof(*sysoff))) {
- err = -EFAULT;
+ sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+ if (IS_ERR(sysoff)) {
+ err = PTR_ERR(sysoff);
+ sysoff = NULL;
break;
}
if (sysoff->n_samples > PTP_MAX_SAMPLES) {
{
int err;
- if (!pwm)
+ if (!pwm || !state || !state->period ||
+ state->duty_cycle > state->period)
return -EINVAL;
if (!memcmp(state, &pwm->state, sizeof(*state)))
chip->chip.of_pwm_n_cells = 3;
chip->chip.can_sleep = 1;
- ret = pwmchip_add(&chip->chip);
+ ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
goto unlock;
}
- pwm_apply_state(pwm, &state);
+ ret = pwm_apply_state(pwm, &state);
unlock:
mutex_unlock(&export->lock);
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
- if (!sreg->sel) {
+ if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
}
unsigned int val;
int ret;
+ if (!rinfo)
+ return 0;
+
switch (fps_src) {
case MAX77620_FPS_SRC_0:
case MAX77620_FPS_SRC_1:
int pd = rpdata->active_fps_pd_slot;
int ret = 0;
+ if (!rinfo)
+ return 0;
+
if (is_suspend) {
pu = rpdata->suspend_fps_pu_slot;
pd = rpdata->suspend_fps_pd_slot;
RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
- RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
+ .list_voltage = regulator_list_voltage_linear_range,
+
+ .get_voltage = rpm_reg_get_voltage,
+ .set_voltage = rpm_reg_set_voltage,
+
+ .set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
static const struct regulator_desc pm8941_lnldo = {
.fixed_uV = 1740000,
.n_voltages = 1,
- .ops = &rpm_smps_ldo_ops,
+ .ops = &rpm_smps_ldo_ops_fixed,
};
static const struct regulator_desc pm8941_switch = {
int ramp_delay)
{
struct tps51632_chip *tps = rdev_get_drvdata(rdev);
- int bit = ramp_delay/6000;
+ int bit;
int ret;
- if (bit)
- bit--;
+ if (ramp_delay == 0)
+ bit = 0;
+ else
+ bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
+
ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
if (ret < 0)
dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
qeth_l2_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
qeth_l3_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
} else {
struct scsi_cmnd *SCp;
- SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
+ SCp = SDp->current_cmnd;
if(unlikely(SCp == NULL)) {
sdev_printk(KERN_ERR, SDp,
"no saved request for untagged cmd\n");
slot->tag, slot);
} else {
slot->tag = SCSI_NO_TAG;
- /* must populate current_cmnd for scsi_host_find_tag to work */
+ /* save current command for reselection */
SCp->device->current_cmnd = SCp;
}
/* sanity check: some of the commands generated by the mid-layer
*/
#define AAC_QUIRK_SCSI_32 0x0020
+/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
/*
* The adapter interface specs all queues to be located in the same
* physically contiguous block. The host structure that defines the
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
else
shost->this_id = shost->max_id;
- aac_intr_normal(aac, 0, 2, 0, NULL);
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
/*
* dmb - we may need to move the setting of these parms somewhere else once
ioa_cfg->intr_flag = IPR_USE_MSI;
else {
ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->clear_isr = 1;
ioa_cfg->nvectors = 1;
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
ActiveCableEventData =
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
if (ActiveCableEventData->ReasonCode ==
- MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+ MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
ioc->name, ActiveCableEventData->ReceptacleID);
pr_info("cannot be powered and devices connected to this active cable");
pr_info("will not be seen. This active cable");
pr_info("requires %d mW of power",
ActiveCableEventData->ActiveCablePowerRequirement);
+ }
break;
default: /* ignore the rest */
if (!vha->flags.online)
return;
- if (rsp->msix->cpuid != smp_processor_id()) {
+ if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
/* if kernel does not notify qla of IRQ's CPU change,
* then set it here.
*/
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
- vmax = 8; /* max length of vendor */
+ vmax = sizeof(devinfo->vendor);
vskip = vendor;
while (vmax > 0 && *vskip == ' ') {
vmax--;
while (vmax > 0 && vskip[vmax - 1] == ' ')
--vmax;
- mmax = 16; /* max length of model */
+ mmax = sizeof(devinfo->model);
mskip = model;
while (mmax > 0 && *mskip == ' ') {
mmax--;
* Behave like the older version of get_device_flags.
*/
if (memcmp(devinfo->vendor, vskip, vmax) ||
- devinfo->vendor[vmax])
+ (vmax < sizeof(devinfo->vendor) &&
+ devinfo->vendor[vmax]))
continue;
if (memcmp(devinfo->model, mskip, mmax) ||
- devinfo->model[mmax])
+ (mmax < sizeof(devinfo->model) &&
+ devinfo->model[mmax]))
continue;
return devinfo;
} else {
*/
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
{
- scmd->device->host->host_failed--;
scmd->eh_eflags = 0;
list_move_tail(&scmd->eh_entry, done_q);
}
else
scsi_unjam_host(shost);
+ /* All scmds have been handled */
+ shost->host_failed = 0;
+
/*
* Note - if the above fails completely, the action is to take
* individual devices offline and flush the queue of any
}
/*
- * If we finished all bytes in the request we are done now.
+ * special case: failed zero length commands always need to
+ * drop down into the retry code. Otherwise, if we finished
+ * all bytes in the request we are done now.
*/
- if (!scsi_end_request(req, error, good_bytes, 0))
+ if (!(blk_rq_bytes(req) == 0 && error) &&
+ !scsi_end_request(req, error, good_bytes, 0))
return;
/*
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
- rw_max = q->limits.io_opt =
- sdkp->opt_xfer_blocks * sdp->sector_size;
- else
+ logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ } else
rw_max = BLK_DEF_MAX_SECTORS;
/* Combine with controller limits */
return blocks << (ilog2(sdev->sector_size) - 9);
}
+static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks * sdev->sector_size;
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
struct spi_device *spi,
struct spi_transfer *xfer)
{
- int ret = 1;
+ int ret = 0;
struct rockchip_spi *rs = spi_master_get_devdata(master);
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
spi_enable_chip(rs, 1);
ret = rockchip_spi_prepare_dma(rs);
}
+ /* successful DMA prepare means the transfer is in progress */
+ ret = ret ? ret : 1;
} else {
spi_enable_chip(rs, 1);
ret = rockchip_spi_pio_transfer(rs);
{
struct sun4i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
/* We don't support transfer larger than the FIFO */
if (tfr->len > SUN4I_FIFO_DEPTH)
- return -EINVAL;
+ return -EMSGSIZE;
+
+ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ return -EMSGSIZE;
reinit_completion(&sspi->done);
sspi->tx_buf = tfr->tx_buf;
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
- /* Fill the TX FIFO */
- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /*
+ * Fill the TX FIFO
+ * Filling the FIFO fully causes timeout for some reason
+ * at least on spi2 on A10s
+ */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
static int ti_qspi_remove(struct platform_device *pdev)
{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ int rc;
+
+ rc = spi_master_suspend(qspi->master);
+ if (rc)
+ return rc;
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
goto error_ret_mut;
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
mutex_unlock(&st->lock);
- if (ret)
+ if (ret < 0)
goto error_ret;
val = ret;
if (base_freq > 0)
{
struct spi_device *spi = to_spi_device(dev);
int i, ret;
- unsigned short *data;
+ unsigned short *data = buf;
__be16 *bdata = buf;
ret = spi_read(spi, buf, count * 2);
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
- if (val > 511)
- val = (val >> 1) | (1 << 9);
- else if (val > 1022)
+ if (val > 1022)
val = (val >> 2) | (3 << 9);
+ else if (val > 511)
+ val = (val >> 1) | (1 << 9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
return 0;
failed:
- if (ni)
+ if (ni) {
lnet_ni_decref(ni);
+ rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+ rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ }
rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
- rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
kiblnd_reject(cmid, &rej);
return -ECONNREFUSED;
if (!efuseTbl)
return;
- eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
if (!eFuseWord) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
goto eFuseWord_failed;
{
struct hal_ops *halfunc = &adapt->HalFunc;
- adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
+
+ adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
if (!adapt->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
goto free_power_table;
}
- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
- cpufreq_dev->id);
-
- cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
- if (IS_ERR(cool_dev))
- goto remove_idr;
-
/* Fill freq-table in descending order of frequencies */
for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
freq = find_next_max(table, freq);
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
+ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+ cpufreq_dev->id);
+
+ cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+ &cpufreq_cooling_ops);
+ if (IS_ERR(cool_dev))
+ goto remove_idr;
+
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
return -ENODEV;
d->raw_bd = bd;
- ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+ ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
if (ret)
return ret;
All modern Linux systems use the Unix98 ptys. Say Y unless
you're on an embedded system and want to conserve memory.
-config DEVPTS_MULTIPLE_INSTANCES
- bool "Support multiple instances of devpts"
- depends on UNIX98_PTYS
- default n
- ---help---
- Enable support for multiple instances of devpts filesystem.
- If you want to have isolated PTY namespaces (eg: in containers),
- say Y here. Otherwise, say N. If enabled, each mount of devpts
- filesystem with the '-o newinstance' option will create an
- independent PTY namespace.
-
config LEGACY_PTYS
bool "Legacy (BSD) PTY support"
default y
fsi = tty->driver_data;
else
fsi = tty->link->driver_data;
- devpts_kill_index(fsi, tty->index);
- devpts_put_ref(fsi);
+
+ if (fsi) {
+ devpts_kill_index(fsi, tty->index);
+ devpts_release(fsi);
+ }
}
static const struct tty_operations ptm_unix98_ops = {
if (retval)
return retval;
- fsi = devpts_get_ref(inode, filp);
- retval = -ENODEV;
- if (!fsi)
+ fsi = devpts_acquire(filp);
+ if (IS_ERR(fsi)) {
+ retval = PTR_ERR(fsi);
goto out_free_file;
+ }
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
retval = index;
if (index < 0)
- goto out_put_ref;
+ goto out_put_fsi;
mutex_lock(&tty_mutex);
return retval;
out:
devpts_kill_index(fsi, index);
-out_put_ref:
- devpts_put_ref(fsi);
+out_put_fsi:
+ devpts_release(fsi);
out_free_file:
tty_free_file(filp);
return retval;
static void do_compute_shiftstate(void)
{
- unsigned int i, j, k, sym, val;
+ unsigned int k, sym, val;
shift_state = 0;
memset(shift_down, 0, sizeof(shift_down));
- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
- if (!key_down[i])
+ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+ sym = U(key_maps[0][k]);
+ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
continue;
- k = i * BITS_PER_LONG;
-
- for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
- if (!test_bit(k, key_down))
- continue;
+ val = KVAL(sym);
+ if (val == KVAL(K_CAPSSHIFT))
+ val = KVAL(K_SHIFT);
- sym = U(key_maps[0][k]);
- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
- continue;
-
- val = KVAL(sym);
- if (val == KVAL(K_CAPSSHIFT))
- val = KVAL(K_SHIFT);
-
- shift_down[val]++;
- shift_state |= (1 << val);
- }
+ shift_down[val]++;
+ shift_state |= BIT(val);
}
}
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
vc->vc_panic_force_write = false;
+ vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
vc->vc_sw->con_init(vc, init);
if (!vc->vc_complement_mask)
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
return fsm->state_changed;
}
EXPORT_SYMBOL_GPL(otg_statemachine);
+MODULE_LICENSE("GPL");
* Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
* deallocated.
*
- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
- * freed. When hcd_release() is called for either hcd in a peer set
- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
- * block new peering attempts
+ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
+ * freed. When hcd_release() is called for either hcd in a peer set,
+ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
*/
static void hcd_release(struct kref *kref)
{
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
mutex_lock(&usb_port_peer_mutex);
- if (usb_hcd_is_primary_hcd(hcd)) {
- kfree(hcd->address0_mutex);
- kfree(hcd->bandwidth_mutex);
- }
if (hcd->shared_hcd) {
struct usb_hcd *peer = hcd->shared_hcd;
peer->shared_hcd = NULL;
- if (peer->primary_hcd == hcd)
- peer->primary_hcd = NULL;
+ peer->primary_hcd = NULL;
+ } else {
+ kfree(hcd->address0_mutex);
+ kfree(hcd->bandwidth_mutex);
}
mutex_unlock(&usb_port_peer_mutex);
kfree(hcd);
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* USB3503 */
+ { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Microsoft Wireless Laser Mouse 6000 Receiver */
{ USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
/* MAYA44USB sound device */
{ USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* ASUS Base Station(T100) */
+ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
{ USB_DEVICE(0x1908, 0x1315), .driver_info =
USB_QUIRK_HONOR_BNUMINTERFACES },
- /* INTEL VALUE SSD */
- { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
-
- /* USB3503 */
- { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
-
- /* ASUS Base Station(T100) */
- { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
/* Protocol and OTG Electrical Test Device */
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Acer C120 LED Projector */
+ { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+
/* Blackmagic Design Intensity Shuttle */
{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* INTEL VALUE SSD */
+ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
{ } /* terminating entry must be last */
};
DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
dev_name(hsotg->dev), ##__VA_ARGS__)
+#ifdef CONFIG_MIPS
+/*
+ * There are some MIPS machines that can run in either big-endian
+ * or little-endian mode and that use the dwc2 register without
+ * a byteswap in both ways.
+ * Unlike other architectures, MIPS apparently does not require a
+ * barrier before the __raw_writel() to synchronize with DMA but does
+ * require the barrier after the __raw_writel() to serialize a set of
+ * writes. This set of operations was added specifically for MIPS and
+ * should only be used there.
+ */
static inline u32 dwc2_readl(const void __iomem *addr)
{
u32 value = __raw_readl(addr);
pr_info("INFO:: wrote %08x to %p\n", value, addr);
#endif
}
+#else
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(const void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline void dwc2_writel(u32 value, void __iomem *addr)
+{
+ writel(value, addr);
+
+#ifdef DWC2_LOG_WRITES
+ pr_info("info:: wrote %08x to %p\n", value, addr);
+#endif
+}
+#endif
/* Maximum number of Endpoints/HostChannels */
#define MAX_EPS_CHANNELS 16
return 1;
}
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
/**
* get_ep_head - return the first request on the endpoint
case USB_ENDPOINT_HALT:
halted = ep->halted;
- dwc2_hsotg_ep_sethalt(&ep->ep, set);
+ dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
* dwc2_hsotg_ep_sethalt - set halt on a given endpoint
* @ep: The endpoint to set halt.
* @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ * the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
*/
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
return 0;
}
+ if (hs_ep->isochronous) {
+ dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+ return -EINVAL;
+ }
+
+ if (!now && value && !list_empty(&hs_ep->queue)) {
+ dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+ ep->name);
+ return -EAGAIN;
+ }
+
if (hs_ep->dir_in) {
epreg = DIEPCTL(index);
epctl = dwc2_readl(hs->regs + epreg);
int ret = 0;
spin_lock_irqsave(&hs->lock, flags);
- ret = dwc2_hsotg_ep_sethalt(ep, value);
+ ret = dwc2_hsotg_ep_sethalt(ep, value, false);
spin_unlock_irqrestore(&hs->lock, flags);
return ret;
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
+#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10)
#define DWC3_DEPCMD_CMDIOC (1 << 8)
platform_set_drvdata(pdev, exynos);
- ret = dwc3_exynos_register_phys(exynos);
- if (ret) {
- dev_err(dev, "couldn't register PHYs\n");
- return ret;
- }
-
exynos->dev = dev;
exynos->clk = devm_clk_get(dev, "usbdrd30");
goto err3;
}
+ ret = dwc3_exynos_register_phys(exynos);
+ if (ret) {
+ dev_err(dev, "couldn't register PHYs\n");
+ goto err4;
+ }
+
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto err4;
+ goto err5;
}
} else {
dev_err(dev, "no device node, failed to add dwc3 core\n");
ret = -ENODEV;
- goto err4;
+ goto err5;
}
return 0;
+err5:
+ platform_device_unregister(exynos->usb2_phy);
+ platform_device_unregister(exynos->usb3_phy);
err4:
regulator_disable(exynos->vdd10);
err3:
switch (dwc3_data->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID
+ val &= ~(USB3_DELAY_VBUSVALID
| USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
| USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
- val |= USB3_DEVICE_NOT_HOST;
+ /*
+ * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+ * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+ * of the pico PHY to 1.
+ */
+
+ val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
break;
case USB_DR_MODE_HOST:
dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
- dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
+ dwc3_data->rstc_pwrdn =
+ devm_reset_control_get_exclusive(dev, "powerdown");
if (IS_ERR(dwc3_data->rstc_pwrdn)) {
dev_err(&pdev->dev, "could not get power controller\n");
ret = PTR_ERR(dwc3_data->rstc_pwrdn);
/* Manage PowerDown */
reset_control_deassert(dwc3_data->rstc_pwrdn);
- dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset");
+ dwc3_data->rstc_rst =
+ devm_reset_control_get_shared(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
dev_err(&pdev->dev, "could not get reset controller\n");
ret = PTR_ERR(dwc3_data->rstc_rst);
return ret;
}
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+ /*
+ * As of core revision 2.60a the recommended programming model
+ * is to set the ClearPendIN bit when issuing a Clear Stall EP
+ * command for IN endpoints. This is to prevent an issue where
+ * some (non-compliant) hosts may not send ACK TPs for pending
+ * IN transfers due to a mishandled error condition. Synopsys
+ * STAR 9000614252.
+ */
+ if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+ cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+ memset(¶ms, 0, sizeof(params));
+
+ return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
+}
+
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
struct dwc3_trb *trb)
{
else
dep->flags |= DWC3_EP_STALL;
} else {
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_CLEARSTALL, ¶ms);
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
struct dwc3_ep *dep;
- struct dwc3_gadget_ep_cmd_params params;
int ret;
dep = dwc->eps[epnum];
dep->flags &= ~DWC3_EP_STALL;
- memset(¶ms, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_CLEARSTALL, ¶ms);
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
WARN_ON_ONCE(ret);
}
}
}
break;
}
- req->length = value;
- req->context = cdev;
- req->zero = value < w_length;
- value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
- if (value < 0) {
- DBG(cdev, "ep_queue --> %d\n", value);
- req->status = 0;
- composite_setup_complete(gadget->ep0, req);
+
+ if (value >= 0) {
+ req->length = value;
+ req->context = cdev;
+ req->zero = value < w_length;
+ value = composite_ep0_queue(cdev, req,
+ GFP_ATOMIC);
+ if (value < 0) {
+ DBG(cdev, "ep_queue --> %d\n", value);
+ req->status = 0;
+ composite_setup_complete(gadget->ep0,
+ req);
+ }
}
return value;
}
.owner = THIS_MODULE,
.name = "configfs-gadget",
},
+ .match_existing_only = 1,
};
static struct config_group *gadgets_make(
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ !d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
+ struct ffs_ep *eps_ptr;
/* Make it a single chunk, less management later on */
vla_group(d);
ffs->raw_descs_length);
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
- for (ret = ffs->eps_count; ret; --ret) {
- struct ffs_ep *ptr;
-
- ptr = vla_ptr(vlabuf, d, eps);
- ptr[ret].num = -1;
- }
+ eps_ptr = vla_ptr(vlabuf, d, eps);
+ for (i = 0; i < ffs->eps_count; i++)
+ eps_ptr[i].num = -1;
/* Save pointers
* d_eps == vlabuf, func->eps used to kfree vlabuf later
goto error;
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
- if (c->cdev->use_os_string)
+ if (c->cdev->use_os_string) {
for (i = 0; i < ffs->interfaces_count; ++i) {
struct usb_os_desc *desc;
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
- ret = ffs_do_os_descs(ffs->ms_os_descs_count,
- vla_ptr(vlabuf, d, raw_descs) +
- fs_len + hs_len + ss_len,
- d_raw_descs__sz - fs_len - hs_len - ss_len,
- __ffs_func_bind_do_os_desc, func);
- if (unlikely(ret < 0))
- goto error;
+ ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) +
+ fs_len + hs_len + ss_len,
+ d_raw_descs__sz - fs_len - hs_len -
+ ss_len,
+ __ffs_func_bind_do_os_desc, func);
+ if (unlikely(ret < 0))
+ goto error;
+ }
func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0;
.wMaxPacketSize = cpu_to_le16(512)
};
-static struct usb_qualifier_descriptor dev_qualifier = {
- .bLength = sizeof(dev_qualifier),
- .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
- .bcdUSB = cpu_to_le16(0x0200),
- .bDeviceClass = USB_CLASS_PRINTER,
- .bNumConfigurations = 1
-};
-
static struct usb_descriptor_header *hs_printer_function[] = {
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &hs_ep_in_desc,
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].tpg == tpg)
break;
- if (i < TPG_INSTANCES)
+ if (i < TPG_INSTANCES) {
tpg_instances[i].tpg = NULL;
- opts = container_of(tpg_instances[i].func_inst,
- struct f_tcm_opts, func_inst);
- mutex_lock(&opts->dep_lock);
- if (opts->has_dep)
- module_put(opts->dependent);
- else
- configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
- mutex_unlock(&opts->dep_lock);
+ opts = container_of(tpg_instances[i].func_inst,
+ struct f_tcm_opts, func_inst);
+ mutex_lock(&opts->dep_lock);
+ if (opts->has_dep)
+ module_put(opts->dependent);
+ else
+ configfs_undepend_item_unlocked(
+ &opts->func_inst.group.cg_item);
+ mutex_unlock(&opts->dep_lock);
+ }
mutex_unlock(&tpg_instances_lock);
kfree(tpg);
NULL,
};
-static struct usb_qualifier_descriptor devqual_desc = {
- .bLength = sizeof devqual_desc,
- .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
-
- .bcdUSB = cpu_to_le16(0x200),
- .bDeviceClass = USB_CLASS_MISC,
- .bDeviceSubClass = 0x02,
- .bDeviceProtocol = 0x01,
- .bNumConfigurations = 1,
- .bRESERVED = 0,
-};
-
static struct usb_interface_assoc_descriptor iad_desc = {
.bLength = sizeof iad_desc,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_cur_lay3 c;
+ memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
c.dCUR = p_srate;
* USB 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the configuration descriptor.
+ * That means alternate endpoint descriptors (bigger packets).
*/
struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
struct usb_ep *ep = dev->gadget->ep0;
struct usb_request *req = dev->req;
- if ((retval = setup_req (ep, req, 0)) == 0)
- retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+ if ((retval = setup_req (ep, req, 0)) == 0) {
+ spin_unlock_irq (&dev->lock);
+ retval = usb_ep_queue (ep, req, GFP_KERNEL);
+ spin_lock_irq (&dev->lock);
+ }
dev->state = STATE_DEV_CONNECTED;
/* assume that was SET_CONFIGURATION */
w_length);
if (value < 0)
break;
+
+ spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
- GFP_ATOMIC);
+ GFP_KERNEL);
+ spin_lock (&dev->lock);
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
if (value >= 0 && dev->state != STATE_DEV_SETUP) {
req->length = value;
req->zero = value < w_length;
- value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+
+ spin_unlock (&dev->lock);
+ value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
}
+ return value;
}
/* device stalls when value < 0 */
}
}
- list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
- driver->function);
+ if (!driver->match_existing_only) {
+ list_add_tail(&driver->pending, &gadget_driver_pending_list);
+ pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+ driver->function);
+ ret = 0;
+ }
+
mutex_unlock(&udc_lock);
- return 0;
+ return ret;
found:
ret = udc_bind_to_driver(udc, driver);
mutex_unlock(&udc_lock);
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ /**
+ * Protect the system from crashing at system shutdown in cases where
+ * usb host is not added yet from OTG controller driver.
+ * As ehci_setup() not done yet, so stop accessing registers or
+ * variables initialized in ehci_setup()
+ */
+ if (!ehci->sbrn)
+ return;
+
spin_lock_irq(&ehci->lock);
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int ports = HCS_N_PORTS (ehci->hcs_params);
- u32 __iomem *status_reg = &ehci->regs->port_status[
- (wIndex & 0xff) - 1];
- u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+ u32 __iomem *status_reg, *hostpc_reg;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
+ /*
+ * Avoid underflow while calculating (wIndex & 0xff) - 1.
+ * The compiler might deduce that wIndex can never be 0 and then
+ * optimize away the tests for !wIndex below.
+ */
+ temp = wIndex & 0xff;
+ temp -= (temp > 0);
+ status_reg = &ehci->regs->port_status[temp];
+ hostpc_reg = &ehci->regs->hostpc[temp];
+
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
static int ehci_msm_pm_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
bool do_wakeup = device_may_wakeup(dev);
dev_dbg(dev, "ehci-msm PM suspend\n");
- return ehci_suspend(hcd, do_wakeup);
+ /* Only call ehci_suspend if ehci_setup has been done */
+ if (ehci->sbrn)
+ return ehci_suspend(hcd, do_wakeup);
+
+ return 0;
}
static int ehci_msm_pm_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
dev_dbg(dev, "ehci-msm PM resume\n");
- ehci_resume(hcd, false);
+
+ /* Only call ehci_resume if ehci_setup has been done */
+ if (ehci->sbrn)
+ ehci_resume(hcd, false);
return 0;
}
+
#else
#define ehci_msm_pm_suspend NULL
#define ehci_msm_pm_resume NULL
priv->clk48 = NULL;
}
- priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+ priv->pwr =
+ devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
if (err == -EPROBE_DEFER)
priv->pwr = NULL;
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+ priv->rst =
+ devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
if (err == -EPROBE_DEFER)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+ bool has_utmi_pad_registers = false;
phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
if (!phy_np)
return -ENOENT;
+ if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
+ has_utmi_pad_registers = true;
+
if (!usb1_reset_attempted) {
struct reset_control *usb1_reset;
- usb1_reset = of_reset_control_get(phy_np, "usb");
+ if (!has_utmi_pad_registers)
+ usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+ else
+ usb1_reset = tegra->rst;
+
if (IS_ERR(usb1_reset)) {
dev_warn(&pdev->dev,
"can't get utmi-pads reset from the PHY\n");
reset_control_assert(usb1_reset);
udelay(1);
reset_control_deassert(usb1_reset);
+
+ if (!has_utmi_pad_registers)
+ reset_control_put(usb1_reset);
}
- reset_control_put(usb1_reset);
usb1_reset_attempted = true;
}
- if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) {
+ if (!has_utmi_pad_registers) {
reset_control_assert(tegra->rst);
udelay(1);
reset_control_deassert(tegra->rst);
{
int branch;
- ed->state = ED_OPER;
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
+
+ ed->state = ED_OPER;
return 0;
}
priv->clk48 = NULL;
}
- priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+ priv->pwr =
+ devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
goto err_put_clks;
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+ priv->rst =
+ devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
goto err_put_clks;
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
#define PCI_VENDOR_ID_ETRON 0x1b6f
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
ret = clk_prepare_enable(clk);
if (ret)
goto put_hcd;
+ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto put_hcd;
}
xhci = hcd_to_xhci(hcd);
temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+
+ /*
+ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+ * but the completion event in never sent. Use the cmd timeout timer to
+ * handle those cases. Use twice the time to cover the bit polling retry
+ */
+ mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
xhci_err(xhci, "Stopped the command ring failed, "
"maybe the host is dead\n");
+ del_timer(&xhci->cmd_timer);
xhci->xhc_state |= XHCI_STATE_DYING;
xhci_quiesce(xhci);
xhci_halt(xhci);
int ret;
unsigned long flags;
u64 hw_ring_state;
- struct xhci_command *cur_cmd = NULL;
+ bool second_timeout = false;
xhci = (struct xhci_hcd *) data;
/* mark this command to be cancelled */
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->current_cmd) {
- cur_cmd = xhci->current_cmd;
- cur_cmd->status = COMP_CMD_ABORT;
+ if (xhci->current_cmd->status == COMP_CMD_ABORT)
+ second_timeout = true;
+ xhci->current_cmd->status = COMP_CMD_ABORT;
}
-
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) {
-
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Command timeout\n");
ret = xhci_abort_cmd_ring(xhci);
}
return;
}
+
+ /* command ring failed to restart, or host removed. Bail out */
+ if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+ xhci_cleanup_command_queue(xhci);
+ return;
+ }
+
/* command timeout on stopped ring, ring can't be aborted */
xhci_dbg(xhci, "Command timeout on stopped ring\n");
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
writel(irq_pending, &xhci->ir_set->irq_pending);
}
- if (xhci->xhc_state & XHCI_STATE_DYING) {
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
"Shouldn't IRQs be disabled?\n");
/* Clear the event handler busy flag (RW1C);
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_HALTED)
- return;
-
mutex_lock(&xhci->mutex);
- spin_lock_irq(&xhci->lock);
- xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- /* Make sure the xHC is halted for a USB3 roothub
- * (xhci_stop() could be called as part of failed init).
- */
- xhci_halt(xhci);
- xhci_reset(xhci);
- spin_unlock_irq(&xhci->lock);
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
+ spin_lock_irq(&xhci->lock);
+
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ xhci_halt(xhci);
+ xhci_reset(xhci);
+
+ spin_unlock_irq(&xhci->lock);
+ }
+
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ mutex_unlock(&xhci->mutex);
+ return;
+ }
xhci_cleanup_msix(xhci);
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
xhci_print_registers(xhci);
- xhci->quirks = quirks;
+ xhci->quirks |= quirks;
get_quirks(dev, xhci);
musb_platform_try_idle(musb, 0);
}
-static void musb_shutdown(struct platform_device *pdev)
-{
- struct musb *musb = dev_to_musb(&pdev->dev);
- unsigned long flags;
-
- pm_runtime_get_sync(musb->controller);
-
- musb_host_cleanup(musb);
- musb_gadget_cleanup(musb);
-
- spin_lock_irqsave(&musb->lock, flags);
- musb_platform_disable(musb);
- musb_generic_disable(musb);
- spin_unlock_irqrestore(&musb->lock, flags);
-
- musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
- musb_platform_exit(musb);
-
- pm_runtime_put(musb->controller);
- /* FIXME power down */
-}
-
-
/*-------------------------------------------------------------------------*/
/*
#define use_dma 0
#endif
-static void (*musb_phy_callback)(enum musb_vbus_id_status status);
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
/*
* musb_mailbox - optional phy notifier function
* Optionally gets called from the USB PHY. Note that the USB PHY must be
* disabled at the point the phy_callback is registered or unregistered.
*/
-void musb_mailbox(enum musb_vbus_id_status status)
+int musb_mailbox(enum musb_vbus_id_status status)
{
if (musb_phy_callback)
- musb_phy_callback(status);
+ return musb_phy_callback(status);
+ return -ENODEV;
};
EXPORT_SYMBOL_GPL(musb_mailbox);
musb_readl = musb_default_readl;
musb_writel = musb_default_writel;
- /* We need musb_read/write functions initialized for PM */
- pm_runtime_use_autosuspend(musb->controller);
- pm_runtime_set_autosuspend_delay(musb->controller, 200);
- pm_runtime_enable(musb->controller);
-
/* The musb_platform_init() call:
* - adjusts musb->mregs
* - sets the musb->isr
if (musb->ops->phy_callback)
musb_phy_callback = musb->ops->phy_callback;
+ /*
+ * We need musb_read/write functions initialized for PM.
+ * Note that at least 2430 glue needs autosuspend delay
+ * somewhere above 300 ms for the hardware to idle properly
+ * after disconnecting the cable in host mode. Let's use
+ * 500 ms for some margin.
+ */
+ pm_runtime_use_autosuspend(musb->controller);
+ pm_runtime_set_autosuspend_delay(musb->controller, 500);
+ pm_runtime_enable(musb->controller);
pm_runtime_get_sync(musb->controller);
status = usb_phy_init(musb->xceiv);
if (status)
goto fail5;
- pm_runtime_put(musb->controller);
-
- /*
- * For why this is currently needed, see commit 3e43a0725637
- * ("usb: musb: core: add pm_runtime_irq_safe()")
- */
- pm_runtime_irq_safe(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
usb_phy_shutdown(musb->xceiv);
err_usb_phy_init:
+ pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
fail2:
if (musb->irq_wake)
musb_platform_exit(musb);
fail1:
- pm_runtime_disable(musb->controller);
dev_err(musb->controller,
"musb_init_controller failed with status %d\n", status);
{
struct device *dev = &pdev->dev;
struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
/* this gets called on rmmod.
* - Host mode: host may still be active
* - OTG mode: both roles are deactivated (or never-activated)
*/
musb_exit_debugfs(musb);
- musb_shutdown(pdev);
- musb_phy_callback = NULL;
-
- if (musb->dma_controller)
- musb_dma_controller_destroy(musb->dma_controller);
-
- usb_phy_shutdown(musb->xceiv);
cancel_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
+ pm_runtime_get_sync(musb->controller);
+ musb_host_cleanup(musb);
+ musb_gadget_cleanup(musb);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ pm_runtime_dont_use_autosuspend(musb->controller);
+ pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
+ musb_platform_exit(musb);
+ musb_phy_callback = NULL;
+ if (musb->dma_controller)
+ musb_dma_controller_destroy(musb->dma_controller);
+ usb_phy_shutdown(musb->xceiv);
musb_free(musb);
device_init_wakeup(dev, 0);
return 0;
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
- musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+ if (musb->context.devctl & MUSB_DEVCTL_SESSION)
+ musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
for (i = 0; i < musb->config->num_eps; ++i) {
struct musb_hw_ep *hw_ep;
},
.probe = musb_probe,
.remove = musb_remove,
- .shutdown = musb_shutdown,
};
module_platform_driver(musb_driver);
dma_addr_t *dma_addr, u32 *len);
void (*pre_root_reset_end)(struct musb *musb);
void (*post_root_reset_end)(struct musb *musb);
- void (*phy_callback)(enum musb_vbus_id_status status);
+ int (*phy_callback)(enum musb_vbus_id_status status);
};
/*
struct work_struct irq_work;
struct delayed_work deassert_reset_work;
struct delayed_work finish_resume_work;
+ struct delayed_work gadget_work;
u16 hwvers;
u16 intrrxe;
return usb_phy_set_power(musb->xceiv, mA);
}
+static void musb_gadget_work(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+
+ musb = container_of(work, struct musb, gadget_work.work);
+ pm_runtime_get_sync(musb->controller);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_pullup(musb, musb->softconnect);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+}
+
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct musb *musb = gadget_to_musb(gadget);
is_on = !!is_on;
- pm_runtime_get_sync(musb->controller);
-
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
spin_lock_irqsave(&musb->lock, flags);
if (is_on != musb->softconnect) {
musb->softconnect = is_on;
- musb_pullup(musb, is_on);
+ schedule_delayed_work(&musb->gadget_work, 0);
}
spin_unlock_irqrestore(&musb->lock, flags);
- pm_runtime_put(musb->controller);
-
return 0;
}
#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
musb->g.is_otg = 0;
#endif
-
+ INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
musb_g_init_endpoints(musb);
musb->is_active = 0;
{
if (musb->port_mode == MUSB_PORT_MODE_HOST)
return;
+
+ cancel_delayed_work_sync(&musb->gadget_work);
usb_del_gadget_udc(&musb->g);
}
if (musb->xceiv->last_event == USB_EVENT_ID)
musb_platform_set_vbus(musb, 1);
- if (musb->xceiv->last_event == USB_EVENT_NONE)
- pm_runtime_put(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
- if (musb->xceiv->last_event == USB_EVENT_NONE)
- pm_runtime_get_sync(musb->controller);
+ pm_runtime_get_sync(musb->controller);
/*
* REVISIT always use otg_set_peripheral() here too;
* that currently misbehaves.
*/
- pm_runtime_put(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
}
}
}
- if (qh != NULL && qh->is_ready) {
+ /*
+ * The pipe must be broken if current urb->status is set, so don't
+ * start next urb.
+ * TODO: to minimize the risk of regression, only check urb->status
+ * for RX, until we have a test case to understand the behavior of TX.
+ */
+ if ((!status || !is_in) && qh && qh->is_ready) {
dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
musb_writew(ep->regs, MUSB_TXCSR, 0);
/* scrub all previous state, clearing toggle */
- } else {
- csr = musb_readw(ep->regs, MUSB_RXCSR);
- if (csr & MUSB_RXCSR_RXPKTRDY)
- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
- musb_readw(ep->regs, MUSB_RXCOUNT));
-
- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
}
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
ep->rx_reinit = 0;
}
-static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
+static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
struct urb *urb, u32 offset,
u32 *length, u8 *mode)
}
channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
-
- return 0;
}
-static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
- struct musb_hw_ep *hw_ep,
- struct musb_qh *qh,
- struct urb *urb,
- u32 offset,
- u32 *length,
- u8 *mode)
+static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ u32 offset,
+ u32 *length,
+ u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
- if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
- return -ENODEV;
-
channel->actual_len = 0;
/*
* to identify the zero-length-final-packet case.
*/
*mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-
- return 0;
}
static bool musb_tx_dma_program(struct dma_controller *dma,
struct dma_channel *channel = hw_ep->tx_channel;
u16 pkt_size = qh->maxpacket;
u8 mode;
- int res;
if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
- res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
- offset, &length, &mode);
+ musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+ &length, &mode);
+ else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+ musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
+ &length, &mode);
else
- res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
- offset, &length, &mode);
- if (res)
return false;
qh->segsize = length;
if (is_in) {
dma = is_dma_capable() ? ep->rx_channel : NULL;
- /* clear nak timeout bit */
+ /*
+ * Need to stop the transaction by clearing REQPKT first
+ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
+ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
+ */
rx_csr = musb_readw(epio, MUSB_RXCSR);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
struct urb *urb,
size_t len)
{
- struct dma_channel *channel = hw_ep->tx_channel;
+ struct dma_channel *channel = hw_ep->rx_channel;
void __iomem *epio = hw_ep->regs;
dma_addr_t *buf;
u32 length, res;
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ rx_csr &= ~MUSB_RXCSR_H_ERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
enum musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
struct device *control_otghs;
+ bool cable_connected;
+ bool enabled;
+ bool powered;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
static struct omap2430_glue *_glue;
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
-{
- struct musb *musb = (void *)_musb;
- unsigned long flags;
- u8 power;
- u8 devctl;
-
- spin_lock_irqsave(&musb->lock, flags);
-
- switch (musb->xceiv->otg->state) {
- case OTG_STATE_A_WAIT_BCON:
-
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- MUSB_DEV_MODE(musb);
- } else {
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
- MUSB_HST_MODE(musb);
- }
- break;
- case OTG_STATE_A_SUSPEND:
- /* finish RESUME signaling? */
- if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
- power = musb_readb(musb->mregs, MUSB_POWER);
- power &= ~MUSB_POWER_RESUME;
- dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
- musb_writeb(musb->mregs, MUSB_POWER, power);
- musb->is_active = 1;
- musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
- | MUSB_PORT_STAT_RESUME);
- musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
- usb_hcd_poll_rh_status(musb->hcd);
- /* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv->otg->state = OTG_STATE_A_HOST;
- }
- break;
- case OTG_STATE_A_HOST:
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE)
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- else
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
- default:
- break;
- }
- spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-
-static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
- unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
- static unsigned long last_timer;
-
- if (timeout == 0)
- timeout = default_timeout;
-
- /* Never idle if active, or when VBUS timeout is not set as host */
- if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
- dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&musb_idle_timer);
- last_timer = jiffies;
- return;
- }
-
- if (time_after(last_timer, timeout)) {
- if (!timer_pending(&musb_idle_timer))
- last_timer = timeout;
- else {
- dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
- return;
- }
- }
- last_timer = timeout;
-
- dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- (unsigned long)jiffies_to_msecs(timeout - jiffies));
- mod_timer(&musb_idle_timer, timeout);
-}
-
static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
{
struct usb_otg *otg = musb->xceiv->otg;
musb_readb(musb->mregs, MUSB_DEVCTL));
}
-static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
-{
- u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
- return 0;
-}
-
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
-static void omap2430_musb_mailbox(enum musb_vbus_id_status status)
+/*
+ * We can get multiple cable events so we need to keep track
+ * of the power state. Only keep power enabled if USB cable is
+ * connected and a gadget is started.
+ */
+static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ bool power_up;
+ int res;
+
+ if (glue->enabled != enabled)
+ glue->enabled = enabled;
+
+ if (glue->cable_connected != cable)
+ glue->cable_connected = cable;
+
+ power_up = glue->enabled && glue->cable_connected;
+ if (power_up == glue->powered) {
+ dev_warn(musb->controller, "power state already %i\n",
+ power_up);
+ return;
+ }
+
+ glue->powered = power_up;
+
+ if (power_up) {
+ res = pm_runtime_get_sync(musb->controller);
+ if (res < 0) {
+ dev_err(musb->controller, "could not enable: %i", res);
+ glue->powered = false;
+ }
+ } else {
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
{
struct omap2430_glue *glue = _glue;
if (!glue) {
pr_err("%s: musb core is not yet initialized\n", __func__);
- return;
+ return -EPROBE_DEFER;
}
glue->status = status;
if (!glue_to_musb(glue)) {
pr_err("%s: musb core is not yet ready\n", __func__);
- return;
+ return -EPROBE_DEFER;
}
schedule_work(&glue->omap_musb_mailbox_work);
+
+ return 0;
}
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
struct usb_otg *otg = musb->xceiv->otg;
+ bool cable_connected;
+
+ cable_connected = ((glue->status == MUSB_ID_GROUND) ||
+ (glue->status == MUSB_VBUS_VALID));
+
+ if (cable_connected)
+ omap2430_set_power(musb, glue->enabled, cable_connected);
switch (glue->status) {
case MUSB_ID_GROUND:
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
musb->xceiv->last_event = USB_EVENT_ID;
if (musb->gadget_driver) {
- pm_runtime_get_sync(dev);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_HOST);
omap2430_musb_set_vbus(musb, 1);
otg->default_a = false;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->xceiv->last_event = USB_EVENT_VBUS;
- if (musb->gadget_driver)
- pm_runtime_get_sync(dev);
omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
dev_dbg(dev, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
- if (musb->gadget_driver) {
+ if (musb->gadget_driver)
omap2430_musb_set_vbus(musb, 0);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
if (data->interface_type == MUSB_INTERFACE_UTMI)
otg_set_vbus(musb->xceiv->otg, 0);
dev_dbg(dev, "ID float\n");
}
+ if (!cable_connected)
+ omap2430_set_power(musb, glue->enabled, cable_connected);
+
atomic_notifier_call_chain(&musb->xceiv->notifier,
musb->xceiv->last_event, NULL);
}
{
struct omap2430_glue *glue = container_of(mailbox_work,
struct omap2430_glue, omap_musb_mailbox_work);
- struct musb *musb = glue_to_musb(glue);
- struct device *dev = musb->controller;
- pm_runtime_get_sync(dev);
omap_musb_set_mailbox(glue);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
}
static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
return PTR_ERR(musb->phy);
}
musb->isr = omap2430_musb_interrupt;
-
- /*
- * Enable runtime PM for musb parent (this driver). We can't
- * do it earlier as struct musb is not yet allocated and we
- * need to touch the musb registers for runtime PM.
- */
- pm_runtime_enable(glue->dev);
- status = pm_runtime_get_sync(glue->dev);
- if (status < 0)
- goto err1;
-
- status = pm_runtime_get_sync(dev);
- if (status < 0) {
- dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
- pm_runtime_put_sync(glue->dev);
- goto err1;
- }
+ phy_init(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
-
if (glue->status != MUSB_UNKNOWN)
omap_musb_set_mailbox(glue);
- phy_init(musb->phy);
- phy_power_on(musb->phy);
-
- pm_runtime_put_noidle(musb->controller);
- pm_runtime_put_noidle(glue->dev);
return 0;
-
-err1:
- return status;
}
static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
+ if (!WARN_ON(!musb->phy))
+ phy_power_on(musb->phy);
+
+ omap2430_set_power(musb, true, glue->cable_connected);
+
switch (glue->status) {
case MUSB_ID_GROUND:
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ if (!WARN_ON(!musb->phy))
+ phy_power_off(musb->phy);
+
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
+
+ omap2430_set_power(musb, false, glue->cable_connected);
}
static int omap2430_musb_exit(struct musb *musb)
{
- del_timer_sync(&musb_idle_timer);
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
phy_exit(musb->phy);
+ musb->phy = NULL;
+ cancel_work_sync(&glue->omap_musb_mailbox_work);
return 0;
}
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_mode = omap2430_musb_set_mode,
- .try_idle = omap2430_musb_try_idle,
-
.set_vbus = omap2430_musb_set_vbus,
.enable = omap2430_musb_enable,
goto err2;
}
- /*
- * Note that we cannot enable PM runtime yet for this
- * driver as we need struct musb initialized first.
- * See omap2430_musb_init above.
- */
+ pm_runtime_enable(glue->dev);
+ pm_runtime_use_autosuspend(glue->dev);
+ pm_runtime_set_autosuspend_delay(glue->dev, 500);
ret = platform_device_add(musb);
if (ret) {
static int omap2430_remove(struct platform_device *pdev)
{
- struct omap2430_glue *glue = platform_get_drvdata(pdev);
+ struct omap2430_glue *glue = platform_get_drvdata(pdev);
+ struct musb *musb = glue_to_musb(glue);
pm_runtime_get_sync(glue->dev);
- cancel_work_sync(&glue->omap_musb_mailbox_work);
platform_device_unregister(glue->musb);
+ omap2430_set_power(musb, false, false);
pm_runtime_put_sync(glue->dev);
+ pm_runtime_dont_use_autosuspend(glue->dev);
pm_runtime_disable(glue->dev);
return 0;
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- if (musb) {
- musb->context.otg_interfsel = musb_readl(musb->mregs,
- OTG_INTERFSEL);
+ if (!musb)
+ return 0;
- omap2430_low_level_exit(musb);
- }
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
+ omap2430_low_level_exit(musb);
return 0;
}
struct musb *musb = glue_to_musb(glue);
if (!musb)
- return -EPROBE_DEFER;
+ return 0;
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
},
};
+module_platform_driver(omap2430_driver);
+
MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("GPL v2");
-
-static int __init omap2430_init(void)
-{
- return platform_driver_register(&omap2430_driver);
-}
-subsys_initcall(omap2430_init);
-
-static void __exit omap2430_exit(void)
-{
- platform_driver_unregister(&omap2430_driver);
-}
-module_exit(omap2430_exit);
struct sunxi_glue {
struct device *dev;
- struct platform_device *musb;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
return;
if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
- struct musb *musb = platform_get_drvdata(glue->musb);
+ struct musb *musb = glue->musb;
unsigned long flags;
u8 devctl;
if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
musb->xceiv->otg->default_a = 1;
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
MUSB_HST_MODE(musb);
devctl |= MUSB_DEVCTL_SESSION;
} else {
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
- if (is_on)
+ if (is_on) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- else
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ } else {
clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ }
schedule_work(&glue->work);
}
if (ret)
goto error_unregister_notifier;
- if (musb->port_mode == MUSB_PORT_MODE_HOST) {
- ret = phy_power_on(glue->phy);
- if (ret)
- goto error_phy_exit;
- set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
- /* Stop musb work from turning vbus off again */
- set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- }
-
musb->isr = sunxi_musb_interrupt;
/* Stop the musb-core from doing runtime pm (not supported on sunxi) */
return 0;
-error_phy_exit:
- phy_exit(glue->phy);
error_unregister_notifier:
if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
return 0;
}
+static int sunxi_set_mode(struct musb *musb, u8 mode)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ int ret;
+
+ if (mode == MUSB_HOST) {
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ return ret;
+
+ set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ /* Stop musb work from turning vbus off again */
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return 0;
+}
+
static void sunxi_musb_enable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ glue->musb = musb;
+
/* musb_core does not call us in a balanced manner */
if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
return;
.exit = sunxi_musb_exit,
.enable = sunxi_musb_enable,
.disable = sunxi_musb_disable,
+ .set_mode = sunxi_set_mode,
.fifo_offset = sunxi_musb_fifo_offset,
.ep_offset = sunxi_musb_ep_offset,
.busctl_offset = sunxi_musb_busctl_offset,
pinfo.data = &pdata;
pinfo.size_data = sizeof(pdata);
- glue->musb = platform_device_register_full(&pinfo);
- if (IS_ERR(glue->musb)) {
- ret = PTR_ERR(glue->musb);
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
goto err_unregister_usb_phy;
}
struct sunxi_glue *glue = platform_get_drvdata(pdev);
struct platform_device *usb_phy = glue->usb_phy;
- platform_device_unregister(glue->musb); /* Frees glue ! */
+ platform_device_unregister(glue->musb_pdev);
usb_phy_generic_unregister(usb_phy);
return 0;
struct regulator *usb3v3;
+ /* used to check initial cable status after probe */
+ struct delayed_work get_status_work;
+
/* used to set vbus, in atomic path */
struct work_struct set_vbus_work;
twl->asleep = 1;
status = MUSB_VBUS_VALID;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
} else {
if (twl->linkstat != MUSB_UNKNOWN) {
status = MUSB_VBUS_OFF;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
if (twl->asleep) {
regulator_disable(twl->usb3v3);
twl->asleep = 0;
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = MUSB_ID_GROUND;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
} else {
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
return IRQ_HANDLED;
}
+static void twl6030_status_work(struct work_struct *work)
+{
+ struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
+ get_status_work.work);
+
+ twl6030_usb_irq(twl->irq2, twl);
+ twl6030_usbotg_irq(twl->irq1, twl);
+}
+
static int twl6030_enable_irq(struct twl6030_usb *twl)
{
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
REG_INT_MSK_STS_C);
- twl6030_usb_irq(twl->irq2, twl);
- twl6030_usbotg_irq(twl->irq1, twl);
return 0;
}
dev_warn(&pdev->dev, "could not create sysfs file\n");
INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+ INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
twl->asleep = 0;
twl6030_enable_irq(twl);
+ schedule_delayed_work(&twl->get_status_work, HZ);
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
return 0;
{
struct twl6030_usb *twl = platform_get_drvdata(pdev);
+ cancel_delayed_work(&twl->get_status_work);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
urblist_entry)
usb_unlink_urb(urbtrack->urb);
spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ parport_del_port(mos_parport->pp);
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
+ scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
static int vhci_get_frame_number(struct usb_hcd *hcd)
{
- pr_err("Not yet implemented\n");
+ dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
return 0;
}
if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
return count;
} else {
- if (pci_read_vpd(pdev, addr, 4, &data) != 4)
+ data = 0;
+ if (pci_read_vpd(pdev, addr, 4, &data) < 0)
return count;
*pdata = cpu_to_le32(data);
}
static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
- vfio_intx_set_signal(vdev, -1);
vfio_virqfd_disable(&vdev->ctx[0].unmask);
vfio_virqfd_disable(&vdev->ctx[0].mask);
+ vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
struct pci_dev *pdev = vdev->pdev;
int i;
- vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
-
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
vfio_virqfd_disable(&vdev->ctx[i].mask);
}
+ vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
if (msix) {
pci_disable_msix(vdev->pdev);
kfree(vdev->msix);
unsigned long pfn, long npage, int prot)
{
long i;
- int ret;
+ int ret = 0;
for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
ret = iommu_map(domain->domain, iova,
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned ss_scl_high = 4000; /* ns */
- const unsigned ss_scl_low = 4700; /* ns */
+ const unsigned ss_scl_high = 4600; /* ns */
+ const unsigned ss_scl_low = 5400; /* ns */
const unsigned fs_scl_high = 600; /* ns */
const unsigned fs_scl_low = 1300; /* ns */
const unsigned sda_hold = 1000; /* ns */
c = (ptr[1] >> 6) & 0x3;
m = (ptr[1] >> 4) & 0x3;
- r = (ptr[1] >> 0) & 0x3;
+ r = (ptr[1] >> 0) & 0xf;
itc = (ptr[2] >> 7) & 0x1;
ec = (ptr[2] >> 4) & 0x7;
config EBC_C384_WDT
tristate "WinSystems EBC-C384 Watchdog Timer"
- depends on X86 && ISA
+ depends on X86 && ISA_BUS_API
select WATCHDOG_CORE
help
Enables watchdog timer support for the watchdog timer on the
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-static void release_memory_resource(struct resource *resource);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
}
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+static void release_memory_resource(struct resource *resource)
+{
+ if (!resource)
+ return;
+
+ /*
+ * No need to reset region to identity mapped since we now
+ * know that no I/O can be in this region
+ */
+ release_resource(resource);
+ kfree(resource);
+}
+
static struct resource *additional_memory_resource(phys_addr_t size)
{
struct resource *res;
return res;
}
-static void release_memory_resource(struct resource *resource)
-{
- if (!resource)
- return;
-
- /*
- * No need to reset region to identity mapped since we now
- * know that no I/O can be in this region
- */
- release_resource(resource);
- kfree(resource);
-}
-
static enum bp_state reserve_additional_memory(void)
{
long credit;
return 0;
}
-static int __init check_prereq(void)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- if (!acpi_gbl_FADT.smi_command)
- return -ENODEV;
-
- if (c->x86_vendor == X86_VENDOR_INTEL) {
- if (!cpu_has(c, X86_FEATURE_EST))
- return -ENODEV;
- return 0;
- }
- if (c->x86_vendor == X86_VENDOR_AMD) {
- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
- * as we get compile warnings for the static functions.
- */
-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
-#define USE_HW_PSTATE 0x00000080
- u32 eax, ebx, ecx, edx;
- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
- return -ENODEV;
- return 0;
- }
- return -ENODEV;
-}
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;
static int __init xen_acpi_processor_init(void)
{
unsigned int i;
- int rc = check_prereq();
+ int rc;
- if (rc)
- return rc;
+ if (!xen_initial_domain())
+ return -ENODEV;
nr_acpi_bits = get_max_acpi_id() + 1;
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val);
if (err)
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
tmp_val = 0;
err = xen_pcibk_config_read(dev, field_start,
/* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes
*/
- if (value == ~PCI_ROM_ADDRESS_ENABLE)
+ if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
bar->which = 1;
else {
u32 tmpval;
(PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64))) {
bar_info->val = res[pos - 1].start >> 32;
- bar_info->len_val = res[pos - 1].end >> 32;
+ bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
return;
}
}
+ if (!res[pos].flags ||
+ (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
+ IORESOURCE_BUSY)))
+ return;
+
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = resource_size(&res[pos]);
+ bar_info->len_val = -resource_size(&res[pos]) |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
}
static void *bar_init(struct pci_dev *dev, int offset)
{
- struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~0);
- bar->which = 0;
return bar;
}
static void *rom_init(struct pci_dev *dev, int offset)
{
- struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
- bar->which = 0;
return bar;
}
rc = -ENOMEM;
goto out;
}
+ } else {
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+ if (&trans->list == &u->transactions)
+ return -ESRCH;
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
- kfree(trans);
+ if (msg_type == XS_TRANSACTION_START)
+ kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- BUG_ON(&trans->list == &u->transactions);
list_del(&trans->list);
-
kfree(trans);
}
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
{
void *ret;
- struct xsd_sockmsg req_msg = *msg;
+ enum xsd_sockmsg_type type = msg->type;
int err;
- if (req_msg.type == XS_TRANSACTION_START)
+ if (type == XS_TRANSACTION_START)
transaction_start();
mutex_lock(&xs_state.request_mutex);
mutex_unlock(&xs_state.request_mutex);
- if (IS_ERR(ret))
- return ret;
-
if ((msg->type == XS_TRANSACTION_END) ||
- ((req_msg.type == XS_TRANSACTION_START) &&
- (msg->type == XS_ERROR)))
+ ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
transaction_end();
return ret;
v9fs_proto_dotu(v9ses));
fid = file->private_data;
if (!fid) {
- fid = v9fs_fid_clone(file->f_path.dentry);
+ fid = v9fs_fid_clone(file_dentry(file));
if (IS_ERR(fid))
return PTR_ERR(fid);
* because we want write after unlink usecase
* to work.
*/
- fid = v9fs_writeback_fid(file->f_path.dentry);
+ fid = v9fs_writeback_fid(file_dentry(file));
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
* because we want write after unlink usecase
* to work.
*/
- fid = v9fs_writeback_fid(filp->f_path.dentry);
+ fid = v9fs_writeback_fid(file_dentry(filp));
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
struct p9_fid *fid, *inode_fid;
struct dentry *res = NULL;
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
struct posix_acl *pacl = NULL, *dacl = NULL;
struct dentry *res = NULL;
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
};
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */
-#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered
+#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
* for expiry, so RCU_walk is
- * not permitted
+ * not permitted. If it progresses to
+ * actual expiry attempt, the flag is
+ * not cleared when EXPIRING is set -
+ * in that case it gets cleared only
+ * when it comes to clearing EXPIRING.
*/
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
if (ino->flags & AUTOFS_INF_PENDING)
goto out;
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
- ino->flags |= AUTOFS_INF_NO_RCU;
+ ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
spin_lock(&sbi->fs_lock);
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
ino->flags |= AUTOFS_INF_EXPIRING;
- smp_mb();
- ino->flags &= ~AUTOFS_INF_NO_RCU;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
return root;
}
- ino->flags &= ~AUTOFS_INF_NO_RCU;
+ ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
}
out:
spin_unlock(&sbi->fs_lock);
while ((dentry = get_next_positive_subdir(dentry, root))) {
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
- if (ino->flags & AUTOFS_INF_NO_RCU)
+ if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
expired = NULL;
else
expired = should_expire(dentry, mnt, timeout, how);
continue;
}
ino = autofs4_dentry_ino(expired);
- ino->flags |= AUTOFS_INF_NO_RCU;
+ ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
spin_lock(&sbi->fs_lock);
goto found;
}
- ino->flags &= ~AUTOFS_INF_NO_RCU;
+ ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
if (expired != dentry)
dput(expired);
spin_unlock(&sbi->fs_lock);
found:
pr_debug("returning %p %pd\n", expired, expired);
ino->flags |= AUTOFS_INF_EXPIRING;
- smp_mb();
- ino->flags &= ~AUTOFS_INF_NO_RCU;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
- spin_lock(&sbi->lookup_lock);
- spin_lock(&expired->d_parent->d_lock);
- spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
- list_move(&expired->d_parent->d_subdirs, &expired->d_child);
- spin_unlock(&expired->d_lock);
- spin_unlock(&expired->d_parent->d_lock);
- spin_unlock(&sbi->lookup_lock);
return expired;
}
int status;
/* Block on any pending expire */
- if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
+ if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
return 0;
if (rcu_walk)
return -ECHILD;
ino = autofs4_dentry_ino(dentry);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
- ino->flags &= ~AUTOFS_INF_EXPIRING;
+ ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&sbi->fs_lock);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
- ino->flags &= ~AUTOFS_INF_EXPIRING;
+ ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
*/
struct inode *inode;
- if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
+ if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
return 0;
if (d_mountpoint(dentry))
return 0;
set_fs(KERNEL_DS);
mutex_lock(&sbi->pipe_mutex);
- wr = __vfs_write(file, data, bytes, &file->f_pos);
- while (bytes && wr) {
+ while (bytes) {
+ wr = __vfs_write(file, data, bytes, &file->f_pos);
+ if (wr <= 0)
+ break;
data += wr;
bytes -= wr;
- wr = __vfs_write(file, data, bytes, &file->f_pos);
}
mutex_unlock(&sbi->pipe_mutex);
goto end_coredump;
/* Align to page */
- if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+ if (!dump_skip(cprm, dataoff - cprm->pos))
goto end_coredump;
for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
goto end_coredump;
}
- if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+ if (!dump_skip(cprm, dataoff - cprm->pos))
goto end_coredump;
if (!elf_fdpic_dump_segments(cprm))
* This algorithm is recursive because the amount of used stack space
* is very small and the max recursion depth is limited.
*/
- indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+ indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
btrfsic_get_block_type(state, block),
block->logical_bytenr, block->dev_state->name,
block->dev_bytenr, block->mirror_num);
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
BUG_ON(tm->slot != 0);
- eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
+ eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
+ eb->len);
if (!eb_rewin) {
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
} else if (old_root) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- eb = alloc_dummy_extent_buffer(root->fs_info, logical);
+ eb = alloc_dummy_extent_buffer(root->fs_info, logical,
+ root->nodesize);
} else {
btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
eb = btrfs_clone_extent_buffer(eb_root);
trans->transid, root->fs_info->generation);
if (!should_cow_block(trans, root, buf)) {
+ trans->dirty = true;
*cow_ret = buf;
return 0;
}
if (!err) {
tmp = (struct btrfs_disk_key *)(kaddr + offset -
map_start);
- } else {
+ } else if (err == 1) {
read_extent_buffer(eb, &unaligned,
offset, sizeof(unaligned));
tmp = &unaligned;
+ } else {
+ return err;
}
} else {
if (!btrfs_buffer_uptodate(tmp, 0, 0))
ret = -EIO;
free_extent_buffer(tmp);
+ } else {
+ ret = PTR_ERR(tmp);
}
return ret;
}
* then we don't want to set the path blocking,
* so we test it here
*/
- if (!should_cow_block(trans, root, b))
+ if (!should_cow_block(trans, root, b)) {
+ trans->dirty = true;
goto cow_done;
+ }
/*
* must have write locks on this node and the
}
ret = key_search(b, key, level, &prev_cmp, &slot);
+ if (ret < 0)
+ goto done;
if (level != 0) {
int dec = 0;
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
- unsigned long count, int wait);
+ unsigned long count, u64 transid, int wait);
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
return 0;
}
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
- struct list_head *del_list)
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list)
{
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_item *item;
delayed_node = btrfs_get_delayed_node(inode);
if (!delayed_node)
- return;
+ return false;
+
+ /*
+ * We can only do one readdir with delayed items at a time because of
+ * item->readdir_list.
+ */
+ inode_unlock_shared(inode);
+ inode_lock(inode);
mutex_lock(&delayed_node->mutex);
item = __btrfs_first_delayed_insertion_item(delayed_node);
* requeue or dequeue this delayed node.
*/
atomic_dec(&delayed_node->refs);
+
+ return true;
}
-void btrfs_put_delayed_items(struct list_head *ins_list,
- struct list_head *del_list)
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list)
{
struct btrfs_delayed_item *curr, *next;
if (atomic_dec_and_test(&curr->refs))
kfree(curr);
}
+
+ /*
+ * The VFS is going to do up_read(), so we need to downgrade back to a
+ * read lock.
+ */
+ downgrade_write(&inode->i_rwsem);
}
int btrfs_should_delete_dir_index(struct list_head *del_list,
void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
/* Used for readdir() */
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
- struct list_head *del_list);
-void btrfs_put_delayed_items(struct list_head *ins_list,
- struct list_head *del_list);
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list);
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list);
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
struct inode *btree_inode = root->fs_info->btree_inode;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
+ if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, 0, WAIT_NONE, btree_get_extent, 0);
int ret;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
+ if (IS_ERR(buf))
return 0;
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
u64 bytenr)
{
if (btrfs_test_is_dummy_root(root))
- return alloc_test_extent_buffer(root->fs_info, bytenr);
+ return alloc_test_extent_buffer(root->fs_info, bytenr,
+ root->nodesize);
return alloc_extent_buffer(root->fs_info, bytenr);
}
int ret;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(buf))
+ return buf;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
if (ret) {
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(void)
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root;
root = btrfs_alloc_root(NULL, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(4096, 4096, 4096, root, NULL, 1);
+ /* We don't use the stripesize in selftest, set it as sectorsize */
+ __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+ BTRFS_ROOT_TREE_OBJECTID);
set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
root->alloc_bytenr = 0;
if (btrfs_need_cleaner_sleep(root))
goto sleep;
+ /*
+ * Do not do anything if we might cause open_ctree() to block
+ * before we have finished mounting the filesystem.
+ */
+ if (!root->fs_info->open)
+ goto sleep;
+
if (!mutex_trylock(&root->fs_info->cleaner_mutex))
goto sleep;
int num_backups_tried = 0;
int backup_index = 0;
int max_active;
- bool cleaner_mutex_locked = false;
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
nodesize = btrfs_super_nodesize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
- stripesize = btrfs_super_stripesize(disk_super);
+ stripesize = sectorsize;
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
goto fail_sysfs;
}
- /*
- * Hold the cleaner_mutex thread here so that we don't block
- * for a long time on btrfs_recover_relocation. cleaner_kthread
- * will wait for us to finish mounting the filesystem.
- */
- mutex_lock(&fs_info->cleaner_mutex);
- cleaner_mutex_locked = true;
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
"btrfs-cleaner");
if (IS_ERR(fs_info->cleaner_kthread))
ret = btrfs_cleanup_fs_roots(fs_info);
if (ret)
goto fail_qgroup;
- /* We locked cleaner_mutex before creating cleaner_kthread. */
+
+ mutex_lock(&fs_info->cleaner_mutex);
ret = btrfs_recover_relocation(tree_root);
+ mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
btrfs_warn(fs_info, "failed to recover relocation: %d",
ret);
goto fail_qgroup;
}
}
- mutex_unlock(&fs_info->cleaner_mutex);
- cleaner_mutex_locked = false;
location.objectid = BTRFS_FS_TREE_OBJECTID;
location.type = BTRFS_ROOT_ITEM_KEY;
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
fail_sysfs:
- if (cleaner_mutex_locked) {
- mutex_unlock(&fs_info->cleaner_mutex);
- cleaner_mutex_locked = false;
- }
btrfs_sysfs_remove_mounted(fs_info);
fail_fsdev_sysfs:
* Hint to catch really bogus numbers, bitflips or so, more exact checks are
* done later
*/
+ if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
+ btrfs_err(fs_info, "bytes_used is too small %llu",
+ btrfs_super_bytes_used(sb));
+ ret = -EINVAL;
+ }
+ if (!is_power_of_2(btrfs_super_stripesize(sb))) {
+ btrfs_err(fs_info, "invalid stripesize %u",
+ btrfs_super_stripesize(sb));
+ ret = -EINVAL;
+ }
if (btrfs_super_num_devices(sb) > (1UL << 31))
printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
btrfs_super_num_devices(sb));
void btrfs_free_fs_root(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(void);
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
#endif
/*
struct btrfs_bio *bbio = NULL;
+ /*
+ * Avoid races with device replace and make sure our bbio has devices
+ * associated to its stripes that don't go away while we are discarding.
+ */
+ btrfs_bio_counter_inc_blocked(root->fs_info);
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
bytenr, &num_bytes, &bbio, 0);
}
btrfs_put_bbio(bbio);
}
+ btrfs_bio_counter_dec(root->fs_info);
if (actual_bytes)
*actual_bytes = discarded_bytes;
struct async_delayed_refs {
struct btrfs_root *root;
+ u64 transid;
int count;
int error;
int sync;
async = container_of(work, struct async_delayed_refs, work);
+ /* if the commit is already started, we don't need to wait here */
+ if (btrfs_transaction_blocked(async->root->fs_info))
+ goto done;
+
trans = btrfs_join_transaction(async->root);
if (IS_ERR(trans)) {
async->error = PTR_ERR(trans);
* wait on delayed refs
*/
trans->sync = true;
+
+ /* Don't bother flushing if we got into a different transaction */
+ if (trans->transid > async->transid)
+ goto end;
+
ret = btrfs_run_delayed_refs(trans, async->root, async->count);
if (ret)
async->error = ret;
-
+end:
ret = btrfs_end_transaction(trans, async->root);
if (ret && !async->error)
async->error = ret;
}
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
- unsigned long count, int wait)
+ unsigned long count, u64 transid, int wait)
{
struct async_delayed_refs *async;
int ret;
async->root = root->fs_info->tree_root;
async->count = count;
async->error = 0;
+ async->transid = transid;
if (wait)
async->sync = 1;
else
struct extent_buffer *buf;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(buf))
+ return buf;
+
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
}
- trans->blocks_used++;
+ trans->dirty = true;
/* this returns a buffer locked for blocking */
return buf;
}
next = btrfs_find_tree_block(root->fs_info, bytenr);
if (!next) {
next = btrfs_find_create_tree_block(root, bytenr);
- if (!next)
- return -ENOMEM;
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
level - 1);
reada = 1;
bio->bi_iter.bi_size = 0;
map_length = length;
+ /*
+ * Avoid races with device replace and make sure our bbio has devices
+ * associated to its stripes that don't go away while we are doing the
+ * read repair operation.
+ */
+ btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_block(fs_info, WRITE, logical,
&map_length, &bbio, mirror_num);
if (ret) {
+ btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return -EIO;
}
dev = bbio->stripes[mirror_num-1].dev;
btrfs_put_bbio(bbio);
if (!dev || !dev->bdev || !dev->writeable) {
+ btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return -EIO;
}
if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
/* try to remap that extent elsewhere? */
+ btrfs_bio_counter_dec(fs_info);
bio_put(bio);
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
return -EIO;
"read error corrected: ino %llu off %llu (dev %s sector %llu)",
btrfs_ino(inode), start,
rcu_str_deref(dev->name), sector);
+ btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return 0;
}
}
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
+ u64 start, u32 nodesize)
{
unsigned long len;
if (!fs_info) {
/*
* Called only from tests that don't always have a fs_info
- * available, but we know that nodesize is 4096
+ * available
*/
- len = 4096;
+ len = nodesize;
} else {
len = fs_info->tree_root->nodesize;
}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
+ u64 start, u32 nodesize)
{
struct extent_buffer *eb, *exists = NULL;
int ret;
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
- eb = alloc_dummy_extent_buffer(fs_info, start);
+ eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
if (!eb)
return NULL;
eb->fs_info = fs_info;
int uptodate = 1;
int ret;
+ if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+ btrfs_err(fs_info, "bad tree block start %llu", start);
+ return ERR_PTR(-EINVAL);
+ }
+
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
- if (!p)
+ if (!p) {
+ exists = ERR_PTR(-ENOMEM);
goto free_eb;
+ }
spin_lock(&mapping->private_lock);
if (PagePrivate(p)) {
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
again:
ret = radix_tree_preload(GFP_NOFS);
- if (ret)
+ if (ret) {
+ exists = ERR_PTR(ret);
goto free_eb;
+ }
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
return ret;
}
+/*
+ * return 0 if the item is found within a page.
+ * return 1 if the item spans two pages.
+ * return -EINVAL otherwise.
+ */
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long min_len, char **map,
unsigned long *map_start,
PAGE_SHIFT;
if (i != end_i)
- return -EINVAL;
+ return 1;
if (i == 0) {
offset = start_offset;
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start);
+ u64 start, u32 nodesize);
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
u64 *end, u64 max_bytes);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start);
+ u64 start, u32 nodesize);
#endif
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
- if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) &&
- check_can_nocow(inode, pos, &write_bytes) > 0) {
- /*
- * For nodata cow case, no need to reserve
- * data space.
- */
- only_release_metadata = true;
- /*
- * our prealloc extent may be smaller than
- * write_bytes, so scale down.
- */
- num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_SIZE);
- reserve_bytes = round_up(write_bytes + sector_offset,
- root->sectorsize);
- goto reserve_metadata;
- }
-
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
- if (ret < 0)
- break;
+ if (ret < 0) {
+ if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_PREALLOC)) &&
+ check_can_nocow(inode, pos, &write_bytes) > 0) {
+ /*
+ * For nodata cow case, no need to reserve
+ * data space.
+ */
+ only_release_metadata = true;
+ /*
+ * our prealloc extent may be smaller than
+ * write_bytes, so scale down.
+ */
+ num_pages = DIV_ROUND_UP(write_bytes + offset,
+ PAGE_SIZE);
+ reserve_bytes = round_up(write_bytes +
+ sector_offset,
+ root->sectorsize);
+ } else {
+ break;
+ }
+ }
-reserve_metadata:
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
if (ret) {
if (!only_release_metadata)
#include "inode-map.h"
#include "volumes.h"
-#define BITS_PER_BITMAP (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
#define MAX_CACHE_BYTES_PER_GIG SZ_32K
struct btrfs_trim_range {
u64 offset)
{
u64 bitmap_start;
- u32 bytes_per_bitmap;
+ u64 bytes_per_bitmap;
bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
bitmap_start = offset - ctl->start;
- bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
+ bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
bitmap_start *= bytes_per_bitmap;
bitmap_start += ctl->start;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
- u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
- u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
+ u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+ u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
- max_bitmaps = max_t(u32, max_bitmaps, 1);
+ max_bitmaps = max_t(u64, max_bitmaps, 1);
ASSERT(ctl->total_bitmaps <= max_bitmaps);
* sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
* we add more bitmaps.
*/
- bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
+ bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
if (bitmap_bytes >= max_bytes) {
ctl->extents_thresh = 0;
if (tmp->offset + tmp->bytes < offset)
break;
if (offset + bytes < tmp->offset) {
- n = rb_prev(&info->offset_index);
+ n = rb_prev(&tmp->offset_index);
continue;
}
info = tmp;
if (offset + bytes < tmp->offset)
break;
if (tmp->offset + tmp->bytes < offset) {
- n = rb_next(&info->offset_index);
+ n = rb_next(&tmp->offset_index);
continue;
}
info = tmp;
return PTR_ERR_OR_ZERO(tfm);
}
+const char* btrfs_crc32c_impl(void)
+{
+ return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
void btrfs_hash_exit(void)
{
crypto_free_shash(tfm);
int __init btrfs_hash_init(void);
void btrfs_hash_exit(void);
+const char* btrfs_crc32c_impl(void);
u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
- BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
+ ASSERT(!ret);
+ if (ret) {
+ atomic_dec(&root->orphan_inodes);
+ clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+ &BTRFS_I(inode)->runtime_flags);
+ if (insert)
+ clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+ &BTRFS_I(inode)->runtime_flags);
+ return ret;
+ }
}
/* insert an orphan item to track this unlinked/truncated file */
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
+ trans->transid,
trans->delayed_ref_updates * 2, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
int name_len;
int is_curr = 0; /* ctx->pos points to the current index? */
bool emitted;
+ bool put = false;
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
INIT_LIST_HEAD(&del_list);
- btrfs_get_delayed_items(inode, &ins_list, &del_list);
+ put = btrfs_readdir_get_delayed_items(inode, &ins_list,
+ &del_list);
}
key.type = key_type;
nopos:
ret = 0;
err:
- if (key_type == BTRFS_DIR_INDEX_KEY)
- btrfs_put_delayed_items(&ins_list, &del_list);
+ if (put)
+ btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
* existing will always be non-NULL, since there must be
* extent causing the -EEXIST.
*/
- if (start >= extent_map_end(existing) ||
+ if (existing->start == em->start &&
+ extent_map_end(existing) == extent_map_end(em) &&
+ em->block_start == existing->block_start) {
+ /*
+ * these two extents are the same, it happens
+ * with inlines especially
+ */
+ free_extent_map(em);
+ em = existing;
+ err = 0;
+
+ } else if (start >= extent_map_end(existing) ||
start <= existing->start) {
/*
* The existing extent map is the one nearest to
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = btrfs_real_readdir,
+ .iterate_shared = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_compat_ioctl,
return count;
}
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
const u64 range_start, const u64 range_len)
{
struct btrfs_root *root;
struct list_head splice;
int done;
+ int total_done = 0;
INIT_LIST_HEAD(&splice);
done = btrfs_wait_ordered_extents(root, nr,
range_start, range_len);
btrfs_put_fs_root(root);
+ total_done += done;
spin_lock(&fs_info->ordered_root_lock);
if (nr != -1) {
list_splice_tail(&splice, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
mutex_unlock(&fs_info->ordered_operations_mutex);
+
+ return total_done;
}
/*
struct rb_node *prev = NULL;
struct btrfs_ordered_extent *test;
int ret = 1;
+ u64 orig_offset = offset;
spin_lock_irq(&tree->lock);
if (ordered) {
/* truncate file */
if (disk_i_size > i_size) {
- BTRFS_I(inode)->disk_i_size = i_size;
+ BTRFS_I(inode)->disk_i_size = orig_offset;
ret = 0;
goto out;
}
u32 *sum, int len);
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
const u64 range_start, const u64 range_len);
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
const u64 range_start, const u64 range_len);
void btrfs_get_logged_extents(struct inode *inode,
struct list_head *logged_list,
do {
enqueued = 0;
+ mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (atomic_read(&device->reada_in_flight) <
MAX_IN_FLIGHT)
enqueued += reada_start_machine_dev(fs_info,
device);
}
+ mutex_unlock(&fs_devices->device_list_mutex);
total += enqueued;
} while (enqueued && total < 10000);
*/
scrub_pause_on(fs_info);
ret = btrfs_inc_block_group_ro(root, cache);
+ if (!ret && is_dev_replace) {
+ /*
+ * If we are doing a device replace wait for any tasks
+ * that started dellaloc right before we set the block
+ * group to RO mode, as they might have just allocated
+ * an extent from it or decided they could do a nocow
+ * write. And if any such tasks did that, wait for their
+ * ordered extents to complete and then commit the
+ * current transaction, so that we can later see the new
+ * extent items in the extent tree - the ordered extents
+ * create delayed data references (for cow writes) when
+ * they complete, which will be run and insert the
+ * corresponding extent items into the extent tree when
+ * we commit the transaction they used when running
+ * inode.c:btrfs_finish_ordered_io(). We later use
+ * the commit root of the extent tree to find extents
+ * to copy from the srcdev into the tgtdev, and we don't
+ * want to miss any new extents.
+ */
+ btrfs_wait_block_group_reservations(cache);
+ btrfs_wait_nocow_writers(cache);
+ ret = btrfs_wait_ordered_roots(fs_info, -1,
+ cache->key.objectid,
+ cache->key.offset);
+ if (ret > 0) {
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans))
+ ret = PTR_ERR(trans);
+ else
+ ret = btrfs_commit_transaction(trans,
+ root);
+ if (ret) {
+ scrub_pause_off(fs_info);
+ btrfs_put_block_group(cache);
+ break;
+ }
+ }
+ }
scrub_pause_off(fs_info);
if (ret == 0) {
break;
}
+ btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1;
+ btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
found_key.offset, cache, is_dev_replace);
scrub_pause_off(fs_info);
+ btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
+ dev_replace->cursor_left = dev_replace->cursor_right;
+ dev_replace->item_needs_writeback = 1;
+ btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
+
if (ro_set)
btrfs_dec_block_group_ro(root, cache);
ret = -ENOMEM;
break;
}
-
- dev_replace->cursor_left = dev_replace->cursor_right;
- dev_replace->item_needs_writeback = 1;
skip:
key.offset = found_key.offset + length;
btrfs_release_path(path);
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
- if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
+ if (!trans->dirty && list_empty(&trans->new_bgs)) {
const char *errstr;
errstr = btrfs_decode_error(errno);
}
}
sb->s_flags &= ~MS_RDONLY;
+
+ fs_info->open = 1;
}
out:
wake_up_process(fs_info->transaction_kthread);
static void btrfs_print_mod_info(void)
{
- printk(KERN_INFO "Btrfs loaded"
+ printk(KERN_INFO "Btrfs loaded, crc32c=%s"
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
", integrity-checker=on"
#endif
- "\n");
+ "\n",
+ btrfs_crc32c_impl());
}
static int btrfs_run_sanity_tests(void)
{
- int ret;
-
+ int ret, i;
+ u32 sectorsize, nodesize;
+ u32 test_sectorsize[] = {
+ PAGE_SIZE,
+ };
ret = btrfs_init_test_fs();
if (ret)
return ret;
-
- ret = btrfs_test_free_space_cache();
- if (ret)
- goto out;
- ret = btrfs_test_extent_buffer_operations();
- if (ret)
- goto out;
- ret = btrfs_test_extent_io();
- if (ret)
- goto out;
- ret = btrfs_test_inodes();
- if (ret)
- goto out;
- ret = btrfs_test_qgroups();
- if (ret)
- goto out;
- ret = btrfs_test_free_space_tree();
+ for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+ sectorsize = test_sectorsize[i];
+ for (nodesize = sectorsize;
+ nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+ nodesize <<= 1) {
+ pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
+ sectorsize, nodesize);
+ ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_buffer_operations(sectorsize,
+ nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_io(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_inodes(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_qgroups(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ }
+ }
out:
btrfs_destroy_test_fs();
return ret;
if (IS_ERR(test_mnt)) {
printk(KERN_ERR "btrfs: cannot mount test file system\n");
unregister_filesystem(&test_type);
- return ret;
+ return PTR_ERR(test_mnt);
}
return 0;
}
}
struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length)
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
{
struct btrfs_block_group_cache *cache;
cache->key.objectid = 0;
cache->key.offset = length;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- cache->sectorsize = 4096;
- cache->full_stripe_len = 4096;
+ cache->sectorsize = sectorsize;
+ cache->full_stripe_len = sectorsize;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
struct btrfs_root;
struct btrfs_trans_handle;
-int btrfs_test_free_space_cache(void);
-int btrfs_test_extent_buffer_operations(void);
-int btrfs_test_extent_io(void);
-int btrfs_test_inodes(void);
-int btrfs_test_qgroups(void);
-int btrfs_test_free_space_tree(void);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
int btrfs_init_test_fs(void);
void btrfs_destroy_test_fs(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
void btrfs_free_dummy_root(struct btrfs_root *root);
struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length);
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
#else
-static inline int btrfs_test_free_space_cache(void)
+static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_extent_buffer_operations(void)
+static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
+ u32 nodesize)
{
return 0;
}
static inline void btrfs_destroy_test_fs(void)
{
}
-static inline int btrfs_test_extent_io(void)
+static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_inodes(void)
+static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_qgroups(void)
+static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_free_space_tree(void)
+static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
{
return 0;
}
#include "../extent_io.h"
#include "../disk-io.h"
-static int test_btrfs_split_item(void)
+static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
{
struct btrfs_path *path;
struct btrfs_root *root;
test_msg("Running btrfs_split_item tests\n");
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
return PTR_ERR(root);
return -ENOMEM;
}
- path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
+ path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
+ nodesize);
if (!eb) {
test_msg("Could not allocate dummy buffer\n");
ret = -ENOMEM;
return ret;
}
-int btrfs_test_extent_buffer_operations(void)
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
{
- test_msg("Running extent buffer operation tests");
- return test_btrfs_split_item();
+ test_msg("Running extent buffer operation tests\n");
+ return test_btrfs_split_item(sectorsize, nodesize);
}
#include <linux/slab.h>
#include <linux/sizes.h>
#include "btrfs-tests.h"
+#include "../ctree.h"
#include "../extent_io.h"
#define PROCESS_UNLOCK (1 << 0)
return count;
}
-static int test_find_delalloc(void)
+static int test_find_delalloc(u32 sectorsize)
{
struct inode *inode;
struct extent_io_tree tmp;
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_delalloc(&tmp, 0, 4095, NULL);
+ set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
start = 0;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
test_msg("Should have found at least one delalloc\n");
goto out_bits;
}
- if (start != 0 || end != 4095) {
- test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
- start, end);
+ if (start != 0 || end != (sectorsize - 1)) {
+ test_msg("Expected start 0 end %u, got start %llu end %llu\n",
+ sectorsize - 1, start, end);
goto out_bits;
}
unlock_extent(&tmp, start, end);
test_msg("Couldn't find the locked page\n");
goto out_bits;
}
- set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
+ set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
* |--- delalloc ---|
* |--- search ---|
*/
- test_start = max_bytes + 4096;
+ test_start = max_bytes + sectorsize;
locked_page = find_lock_page(inode->i_mapping, test_start >>
PAGE_SHIFT);
if (!locked_page) {
return ret;
}
+/**
+ * test_bit_in_byte - Determine whether a bit is set in a byte
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit_in_byte(int nr, const u8 *addr)
+{
+ return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+}
+
static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
unsigned long len)
{
return -EINVAL;
}
- bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
- sizeof(long) * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
- test_msg("Setting straddling pages failed\n");
- return -EINVAL;
- }
+ /* Straddling pages test */
+ if (len > PAGE_SIZE) {
+ bitmap_set(bitmap,
+ (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+ sizeof(long) * BITS_PER_BYTE);
+ extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ test_msg("Setting straddling pages failed\n");
+ return -EINVAL;
+ }
- bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
- bitmap_clear(bitmap,
- (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
- sizeof(long) * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
- test_msg("Clearing straddling pages failed\n");
- return -EINVAL;
+ bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
+ bitmap_clear(bitmap,
+ (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+ sizeof(long) * BITS_PER_BYTE);
+ extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
+ extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ test_msg("Clearing straddling pages failed\n");
+ return -EINVAL;
+ }
}
/*
for (i = 0; i < len * BITS_PER_BYTE; i++) {
int bit, bit1;
- bit = !!test_bit(i, bitmap);
+ bit = !!test_bit_in_byte(i, (u8 *)bitmap);
bit1 = !!extent_buffer_test_bit(eb, 0, i);
if (bit1 != bit) {
test_msg("Testing bit pattern failed\n");
return 0;
}
-static int test_eb_bitmaps(void)
+static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
- unsigned long len = PAGE_SIZE * 4;
+ unsigned long len;
unsigned long *bitmap;
struct extent_buffer *eb;
int ret;
test_msg("Running extent buffer bitmap tests\n");
+ /*
+ * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
+ * BTRFS_MAX_METADATA_BLOCKSIZE.
+ */
+ len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
+ ? sectorsize * 4 : sectorsize;
+
bitmap = kmalloc(len, GFP_KERNEL);
if (!bitmap) {
test_msg("Couldn't allocate test bitmap\n");
/* Do it over again with an extent buffer which isn't page-aligned. */
free_extent_buffer(eb);
- eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
+ eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
if (!eb) {
test_msg("Couldn't allocate test extent buffer\n");
kfree(bitmap);
return ret;
}
-int btrfs_test_extent_io(void)
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
{
int ret;
test_msg("Running extent I/O tests\n");
- ret = test_find_delalloc();
+ ret = test_find_delalloc(sectorsize);
if (ret)
goto out;
- ret = test_eb_bitmaps();
+ ret = test_eb_bitmaps(sectorsize, nodesize);
out:
test_msg("Extent I/O tests finished\n");
return ret;
#include "../disk-io.h"
#include "../free-space-cache.h"
-#define BITS_PER_BITMAP (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
/*
* This test just does basic sanity checking, making sure we can add an extent
return 0;
}
-static int test_bitmaps(struct btrfs_block_group_cache *cache)
+static int test_bitmaps(struct btrfs_block_group_cache *cache,
+ u32 sectorsize)
{
u64 next_bitmap_offset;
int ret;
* The first bitmap we have starts at offset 0 so the next one is just
* at the end of the first bitmap.
*/
- next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+ next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
/* Test a bit straddling two bitmaps */
ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
}
/* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+ u32 sectorsize)
{
- u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+ u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
int ret;
test_msg("Running bitmap and extent tests\n");
* requests.
*/
static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+ u32 sectorsize)
{
int ret;
u64 offset;
* The goal is to test that the bitmap entry space stealing doesn't
* steal this space region.
*/
- ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096);
+ ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
return -ENOENT;
}
- if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) {
- test_msg("Cache free space is not 1Mb + 4Kb\n");
+ if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
+ test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
return -EINVAL;
}
return -EINVAL;
}
- /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
+ /*
+ * All that remains is a sectorsize free space region in a bitmap.
+ * Confirm.
+ */
ret = check_num_extents_and_bitmaps(cache, 1, 1);
if (ret)
return ret;
- if (cache->free_space_ctl->free_space != 4096) {
- test_msg("Cache free space is not 4Kb\n");
+ if (cache->free_space_ctl->free_space != sectorsize) {
+ test_msg("Cache free space is not %u\n", sectorsize);
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
- 0, 4096, 0,
+ 0, sectorsize, 0,
&max_extent_size);
if (offset != (SZ_128M + SZ_16M)) {
- test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
- offset);
+ test_msg("Failed to allocate %u, returned offset : %llu\n",
+ sectorsize, offset);
return -EINVAL;
}
* The goal is to test that the bitmap entry space stealing doesn't
* steal this space region.
*/
- ret = btrfs_add_free_space(cache, SZ_32M, 8192);
+ ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
/*
* Confirm that our extent entry didn't stole all free space from the
- * bitmap, because of the small 8Kb free space region.
+ * bitmap, because of the small 2 * sectorsize free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return -ENOENT;
}
- if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) {
- test_msg("Cache free space is not 1Mb + 8Kb\n");
+ if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
+ test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
return -EINVAL;
}
return -EINVAL;
}
- /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
+ /*
+ * All that remains is 2 * sectorsize free space region
+ * in a bitmap. Confirm.
+ */
ret = check_num_extents_and_bitmaps(cache, 1, 1);
if (ret)
return ret;
- if (cache->free_space_ctl->free_space != 8192) {
- test_msg("Cache free space is not 8Kb\n");
+ if (cache->free_space_ctl->free_space != 2 * sectorsize) {
+ test_msg("Cache free space is not %u\n", 2 * sectorsize);
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
- 0, 8192, 0,
+ 0, 2 * sectorsize, 0,
&max_extent_size);
if (offset != SZ_32M) {
- test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
+ test_msg("Failed to allocate %u, offset: %llu\n",
+ 2 * sectorsize,
offset);
return -EINVAL;
}
return 0;
}
-int btrfs_test_free_space_cache(void)
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
struct btrfs_block_group_cache *cache;
struct btrfs_root *root = NULL;
test_msg("Running btrfs free space cache tests\n");
- cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024);
+ /*
+ * For ppc64 (with 64k page size), bytes per bitmap might be
+ * larger than 1G. To make bitmap test available in ppc64,
+ * alloc dummy block group whose size cross bitmaps.
+ */
+ cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
+ + PAGE_SIZE, sectorsize);
if (!cache) {
test_msg("Couldn't run the tests\n");
return 0;
}
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out;
ret = test_extents(cache);
if (ret)
goto out;
- ret = test_bitmaps(cache);
+ ret = test_bitmaps(cache, sectorsize);
if (ret)
goto out;
- ret = test_bitmaps_and_extents(cache);
+ ret = test_bitmaps_and_extents(cache, sectorsize);
if (ret)
goto out;
- ret = test_steal_space_from_bitmap_to_extent(cache);
+ ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
out:
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
* Boston, MA 021110-1307, USA.
*/
+#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../disk-io.h"
* The test cases align their operations to this in order to hit some of the
* edge cases in the bitmap code.
*/
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
+#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *,
struct btrfs_path *);
-static int run_test(test_func_t test_func, int bitmaps)
+static int run_test(test_func_t test_func, int bitmaps,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root = NULL;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_path *path = NULL;
int ret;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate dummy root\n");
ret = PTR_ERR(root);
root->fs_info->free_space_root = root;
root->fs_info->tree_root = root;
- root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+ root->node = alloc_test_extent_buffer(root->fs_info,
+ nodesize, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
}
btrfs_set_header_level(root->node, 0);
btrfs_set_header_nritems(root->node, 0);
- root->alloc_bytenr += 8192;
+ root->alloc_bytenr += 2 * nodesize;
- cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE);
+ cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
if (!cache) {
test_msg("Couldn't allocate dummy block group cache\n");
ret = -ENOMEM;
return ret;
}
-static int run_test_both_formats(test_func_t test_func)
+static int run_test_both_formats(test_func_t test_func,
+ u32 sectorsize, u32 nodesize)
{
int ret;
- ret = run_test(test_func, 0);
+ ret = run_test(test_func, 0, sectorsize, nodesize);
if (ret)
return ret;
- return run_test(test_func, 1);
+ return run_test(test_func, 1, sectorsize, nodesize);
}
-int btrfs_test_free_space_tree(void)
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
{
test_func_t tests[] = {
test_empty_block_group,
test_msg("Running free space tree tests\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- int ret = run_test_both_formats(tests[i]);
+ int ret = run_test_both_formats(tests[i], sectorsize,
+ nodesize);
if (ret) {
- test_msg("%pf failed\n", tests[i]);
+ test_msg("%pf : sectorsize %u failed\n",
+ tests[i], sectorsize);
return ret;
}
}
* Boston, MA 021110-1307, USA.
*/
+#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../btrfs_inode.h"
* diagram of how the extents will look though this may not be possible we still
* want to make sure everything acts normally (the last number is not inclusive)
*
- * [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288]
- * [hole ][inline][ hole ][ regular ][regular1 split][ hole ]
+ * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split]
*
- * [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056]
- * [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ]
+ * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
+ * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written]
*
- * [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ]
- * [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent]
+ * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
+ * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1]
*
- * [81920-86016]
- * [ regular ]
+ * [69635-73731][ 73731 - 86019 ][86019-90115]
+ * [ regular ][ hole but no extent][ regular ]
*/
-static void setup_file_extents(struct btrfs_root *root)
+static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
{
int slot = 0;
u64 disk_bytenr = SZ_1M;
insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
slot);
slot++;
- offset = 4096;
+ offset = sectorsize;
/* Now another hole */
insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
offset += 4;
/* Now for a regular extent */
- insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
+ disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- disk_bytenr += 4096;
- offset += 4095;
+ disk_bytenr += sectorsize;
+ offset += sectorsize - 1;
/*
* Now for 3 extents that were split from a hole punch so we test
* offsets properly.
*/
- insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
- 0, slot);
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
+ BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, 4 * sectorsize,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 8192;
- disk_bytenr += 16384;
+ offset += 2 * sectorsize;
+ disk_bytenr += 4 * sectorsize;
/* Now for a unwritten prealloc extent */
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
- offset += 4096;
+ offset += sectorsize;
/*
* We want to jack up disk_bytenr a little more so the em stuff doesn't
* merge our records.
*/
- disk_bytenr += 8192;
+ disk_bytenr += 2 * sectorsize;
/*
* Now for a partially written prealloc extent, basically the same as
* the hole punch example above. Ram_bytes never changes when you mark
* extents written btw.
*/
- insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
- BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
+ disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
+ slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, 4 * sectorsize,
BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
- offset += 8192;
- disk_bytenr += 16384;
+ offset += 2 * sectorsize;
+ disk_bytenr += 4 * sectorsize;
/* Now a normal compressed extent */
- insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+ insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
+ disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
+ BTRFS_COMPRESS_ZLIB, slot);
slot++;
- offset += 8192;
+ offset += 2 * sectorsize;
/* No merges */
- disk_bytenr += 8192;
+ disk_bytenr += 2 * sectorsize;
/* Now a split compressed extent */
- insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG,
+ BTRFS_COMPRESS_ZLIB, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0,
+ disk_bytenr + sectorsize, sectorsize,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, sectorsize,
BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
slot++;
- offset += 8192;
- disk_bytenr += 8192;
+ offset += 2 * sectorsize;
+ disk_bytenr += 2 * sectorsize;
/* Now extents that have a hole but no hole extent */
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 16384;
- disk_bytenr += 4096;
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ offset += 4 * sectorsize;
+ disk_bytenr += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
}
static unsigned long prealloc_only = 0;
static unsigned long compressed_only = 0;
static unsigned long vacancy_only = 0;
-static noinline int test_btrfs_get_extent(void)
+static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
goto out;
}
- root->node = alloc_dummy_extent_buffer(NULL, 4096);
+ root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
/* First with no extents */
BTRFS_I(inode)->root = root;
- em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
if (IS_ERR(em)) {
em = NULL;
test_msg("Got an error when we shouldn't have\n");
* setup_file_extents, so if you change anything there you need to
* update the comment and update the expected values below.
*/
- setup_file_extents(root);
+ setup_file_extents(root, sectorsize);
em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
if (IS_ERR(em)) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected an inline, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4091) {
+
+ if (em->start != offset || em->len != (sectorsize - 5)) {
test_msg("Unexpected extent wanted start %llu len 1, got start "
"%llu len %llu\n", offset, em->start, em->len);
goto out;
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
free_extent_map(em);
/* Regular extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4095) {
+ if (em->start != offset || em->len != sectorsize - 1) {
test_msg("Unexpected extent wanted start %llu len 4095, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
free_extent_map(em);
/* The next 3 are split extents */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
free_extent_map(em);
/* Prealloc extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
free_extent_map(em);
/* The next 3 are a half written prealloc extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
free_extent_map(em);
/* Now for the compressed extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u,"
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
free_extent_map(em);
/* Split compressed extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u,"
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
disk_bytenr, em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
free_extent_map(em);
/* A hole between regular extents but no hole extent */
- em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
* length of the actual hole, if this changes we'll have to change this
* test.
*/
- if (em->start != offset || em->len != 12288) {
- test_msg("Unexpected extent wanted start %llu len 12288, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 3 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 3 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != vacancy_only) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u,"
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
return ret;
}
-static int test_hole_first(void)
+static int test_hole_first(u32 sectorsize, u32 nodesize)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
goto out;
}
- root->node = alloc_dummy_extent_buffer(NULL, 4096);
+ root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
* btrfs_get_extent.
*/
insert_inode_item_key(root);
- insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
- BTRFS_FILE_EXTENT_REG, 0, 1);
- em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
+ insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
+ em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
- if (em->start != 0 || em->len != 4096) {
- test_msg("Unexpected extent wanted start 0 len 4096, got start "
- "%llu len %llu\n", em->start, em->len);
+ if (em->start != 0 || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start 0 len %u, "
+ "got start %llu len %llu\n",
+ sectorsize, em->start, em->len);
goto out;
}
if (em->flags != vacancy_only) {
}
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
+ em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
- if (em->block_start != 4096) {
+ if (em->block_start != sectorsize) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != 4096 || em->len != 4096) {
- test_msg("Unexpected extent wanted start 4096 len 4096, got "
- "start %llu len %llu\n", em->start, em->len);
+ if (em->start != sectorsize || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %u len %u, "
+ "got start %llu len %llu\n",
+ sectorsize, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
return ret;
}
-static int test_extent_accounting(void)
+static int test_extent_accounting(u32 sectorsize, u32 nodesize)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
return ret;
}
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE][4k] */
+ /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
- BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
+ BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
+ NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
+ /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
BTRFS_MAX_EXTENT_SIZE >> 1,
- (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+ (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
EXTENT_DELALLOC | EXTENT_DIRTY |
EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
NULL, GFP_KERNEL);
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE][4K] */
+ /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
- (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+ (BTRFS_MAX_EXTENT_SIZE >> 1)
+ + sectorsize - 1,
NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
}
/*
- * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
+ * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
*
* I'm artificially adding 2 to outstanding_extents because in the
* buffered IO case we'd add things up as we go, but I don't feel like
* doing that here, this isn't the interesting case we want to test.
*/
BTRFS_I(inode)->outstanding_extents += 2;
- ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
- (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
- NULL);
+ ret = btrfs_set_extent_delalloc(inode,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
+ (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
+ NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
+ /*
+ * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+ */
BTRFS_I(inode)->outstanding_extents++;
- ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
- BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+ ret = btrfs_set_extent_delalloc(inode,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
/* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
- BTRFS_MAX_EXTENT_SIZE+4096,
- BTRFS_MAX_EXTENT_SIZE+8191,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
NULL, GFP_KERNEL);
* might fail and I'd rather satisfy my paranoia at this point.
*/
BTRFS_I(inode)->outstanding_extents++;
- ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
- BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+ ret = btrfs_set_extent_delalloc(inode,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
return ret;
}
-int btrfs_test_inodes(void)
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
{
int ret;
set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
test_msg("Running btrfs_get_extent tests\n");
- ret = test_btrfs_get_extent();
+ ret = test_btrfs_get_extent(sectorsize, nodesize);
if (ret)
return ret;
test_msg("Running hole first btrfs_get_extent test\n");
- ret = test_hole_first();
+ ret = test_hole_first(sectorsize, nodesize);
if (ret)
return ret;
test_msg("Running outstanding_extents tests\n");
- return test_extent_accounting();
+ return test_extent_accounting(sectorsize, nodesize);
}
* Boston, MA 021110-1307, USA.
*/
+#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../transaction.h"
return ret;
}
-static int test_no_shared_qgroup(struct btrfs_root *root)
+static int test_no_shared_qgroup(struct btrfs_root *root,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
btrfs_init_dummy_trans(&trans);
test_msg("Qgroup basic add\n");
- ret = btrfs_create_qgroup(NULL, fs_info, 5);
+ ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
* we can only call btrfs_qgroup_account_extent() directly to test
* quota.
*/
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
old_roots = NULL;
new_roots = NULL;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = remove_extent_item(root, 4096, 4096);
+ ret = remove_extent_item(root, nodesize, nodesize);
if (ret)
return -EINVAL;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return -EINVAL;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
* right, also remove one of the roots and make sure the exclusive count is
* adjusted properly.
*/
-static int test_multiple_refs(struct btrfs_root *root)
+static int test_multiple_refs(struct btrfs_root *root,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
test_msg("Qgroup multiple refs test\n");
- /* We have 5 created already from the previous test */
- ret = btrfs_create_qgroup(NULL, fs_info, 256);
+ /*
+ * We have BTRFS_FS_TREE_OBJECTID created already from the
+ * previous test.
+ */
+ ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
}
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = add_tree_ref(root, 4096, 4096, 0, 256);
+ ret = add_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+ nodesize, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = remove_extent_ref(root, 4096, 4096, 0, 256);
+ ret = remove_extent_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+ 0, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
return 0;
}
-int btrfs_test_qgroups(void)
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root;
struct btrfs_root *tmp_root;
int ret = 0;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
return PTR_ERR(root);
* Can't use bytenr 0, some things freak out
* *cough*backref walking code*cough*
*/
- root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
+ nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
}
btrfs_set_header_level(root->node, 0);
btrfs_set_header_nritems(root->node, 0);
- root->alloc_bytenr += 8192;
+ root->alloc_bytenr += 2 * nodesize;
- tmp_root = btrfs_alloc_dummy_root();
+ tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
goto out;
}
- tmp_root->root_key.objectid = 5;
+ tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
root->fs_info->fs_root = tmp_root;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
if (ret) {
goto out;
}
- tmp_root = btrfs_alloc_dummy_root();
+ tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
goto out;
}
- tmp_root->root_key.objectid = 256;
+ tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
if (ret) {
test_msg("Couldn't insert fs root %d\n", ret);
}
test_msg("Running qgroup tests\n");
- ret = test_no_shared_qgroup(root);
+ ret = test_no_shared_qgroup(root, sectorsize, nodesize);
if (ret)
goto out;
- ret = test_multiple_refs(root);
+ ret = test_multiple_refs(root, sectorsize, nodesize);
out:
btrfs_free_dummy_root(root);
return ret;
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
+ u64 transid = trans->transid;
unsigned long cur = trans->delayed_ref_updates;
int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
- btrfs_async_run_delayed_refs(root, cur,
+ btrfs_async_run_delayed_refs(root, cur, transid,
must_run_delayed_refs == 1);
}
return err;
return ret;
}
-/* Bisesctability fixup, remove in 4.8 */
-#ifndef btrfs_std_error
-#define btrfs_std_error btrfs_handle_fs_error
-#endif
-
/*
* Do all special snapshot related qgroup dirty hack.
*
switch_commit_roots(trans->transaction, fs_info);
ret = btrfs_write_and_wait_transaction(trans, src);
if (ret)
- btrfs_std_error(fs_info, ret,
+ btrfs_handle_fs_error(fs_info, ret,
"Error while writing out transaction for qgroup");
out:
u64 chunk_bytes_reserved;
unsigned long use_count;
unsigned long blocks_reserved;
- unsigned long blocks_used;
unsigned long delayed_ref_updates;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
bool can_flush_pending_bgs;
bool reloc_reserved;
bool sync;
+ bool dirty;
unsigned int type;
/*
* this root is only needed to validate that the root passed to
root_owner = btrfs_header_owner(parent);
next = btrfs_find_create_tree_block(root, bytenr);
- if (!next)
- return -ENOMEM;
+ if (IS_ERR(next))
+ return PTR_ERR(next);
if (*level == 1) {
ret = wc->process_func(root, next, wc, ptr_gen);
u64 dev_extent_len = 0;
u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
int i, ret = 0;
+ struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
/* Just in case */
root = root->fs_info->chunk_root;
check_system_chunk(trans, extent_root, map->type);
unlock_chunks(root->fs_info->chunk_root);
+ /*
+ * Take the device list mutex to prevent races with the final phase of
+ * a device replace operation that replaces the device object associated
+ * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+ */
+ mutex_lock(&fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *device = map->stripes[i].dev;
ret = btrfs_free_dev_extent(trans, device,
map->stripes[i].physical,
&dev_extent_len);
if (ret) {
+ mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, root, ret);
goto out;
}
if (map->stripes[i].dev) {
ret = btrfs_update_device(trans, map->stripes[i].dev);
if (ret) {
+ mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, root, ret);
goto out;
}
}
}
+ mutex_unlock(&fs_devices->device_list_mutex);
+
ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (IS_ERR(uuid_root)) {
ret = PTR_ERR(uuid_root);
btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_end_transaction(trans, tree_root);
return ret;
}
if (type & BTRFS_BLOCK_GROUP_RAID5) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
- btrfs_super_stripesize(info->super_copy));
+ extent_root->stripesize);
data_stripes = num_stripes - 1;
}
if (type & BTRFS_BLOCK_GROUP_RAID6) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
- btrfs_super_stripesize(info->super_copy));
+ extent_root->stripesize);
data_stripes = num_stripes - 2;
}
}
}
if (found) {
- if (physical_of_found + map->stripe_len <=
- dev_replace->cursor_left) {
- struct btrfs_bio_stripe *tgtdev_stripe =
- bbio->stripes + num_stripes;
+ struct btrfs_bio_stripe *tgtdev_stripe =
+ bbio->stripes + num_stripes;
- tgtdev_stripe->physical = physical_of_found;
- tgtdev_stripe->length =
- bbio->stripes[index_srcdev].length;
- tgtdev_stripe->dev = dev_replace->tgtdev;
- bbio->tgtdev_map[index_srcdev] = num_stripes;
+ tgtdev_stripe->physical = physical_of_found;
+ tgtdev_stripe->length =
+ bbio->stripes[index_srcdev].length;
+ tgtdev_stripe->dev = dev_replace->tgtdev;
+ bbio->tgtdev_map[index_srcdev] = num_stripes;
- tgtdev_indexes++;
- num_stripes++;
- }
+ tgtdev_indexes++;
+ num_stripes++;
}
}
return dev;
}
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
- struct extent_buffer *leaf,
- struct btrfs_chunk *chunk)
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk, u64 logical)
{
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
- struct map_lookup *map;
- struct extent_map *em;
- u64 logical;
u64 length;
u64 stripe_len;
- u64 devid;
- u8 uuid[BTRFS_UUID_SIZE];
- int num_stripes;
- int ret;
- int i;
+ u16 num_stripes;
+ u16 sub_stripes;
+ u64 type;
- logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- /* Validation check */
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+
if (!num_stripes) {
btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
num_stripes);
"invalid chunk logical %llu", logical);
return -EIO;
}
+ if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+ btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+ btrfs_chunk_sector_size(leaf, chunk));
+ return -EIO;
+ }
if (!length || !IS_ALIGNED(length, root->sectorsize)) {
btrfs_err(root->fs_info,
"invalid chunk length %llu", length);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- btrfs_chunk_type(leaf, chunk)) {
+ type) {
btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
+ if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+ num_stripes != 1)) {
+ btrfs_err(root->fs_info,
+ "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+ num_stripes, sub_stripes,
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk)
+{
+ struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+ struct map_lookup *map;
+ struct extent_map *em;
+ u64 logical;
+ u64 length;
+ u64 stripe_len;
+ u64 devid;
+ u8 uuid[BTRFS_UUID_SIZE];
+ int num_stripes;
+ int ret;
+ int i;
+
+ logical = key->offset;
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+ ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+ if (ret)
+ return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
u32 array_size;
u32 len = 0;
u32 cur_offset;
+ u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
* overallocate but we can keep it as-is, only the first page is used.
*/
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
- if (!sb)
- return -ENOMEM;
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
/*
break;
}
+ type = btrfs_chunk_type(sb, chunk);
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+ btrfs_err(root->fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur_offset);
+ ret = -EIO;
+ break;
+ }
+
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
sb_array_offset += len;
cur_offset += len;
}
+ clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return ret;
out_short_read:
printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
len, cur_offset);
+ clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return -EIO;
}
struct btrfs_key found_key;
int ret;
int slot;
+ u64 total_dev = 0;
root = root->fs_info->chunk_root;
ret = read_one_dev(root, leaf, dev_item);
if (ret)
goto error;
+ total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
}
path->slots[0]++;
}
+
+ /*
+ * After loading chunk tree, we've got all device information,
+ * do another round of validation checks.
+ */
+ if (total_dev != root->fs_info->fs_devices->total_devices) {
+ btrfs_err(root->fs_info,
+ "super_num_devices %llu mismatch with num_devices %llu found here",
+ btrfs_super_num_devices(root->fs_info->super_copy),
+ total_dev);
+ ret = -EINVAL;
+ goto error;
+ }
+ if (btrfs_super_total_bytes(root->fs_info->super_copy) <
+ root->fs_info->fs_devices->total_rw_bytes) {
+ btrfs_err(root->fs_info,
+ "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
+ btrfs_super_total_bytes(root->fs_info->super_copy),
+ root->fs_info->fs_devices->total_rw_bytes);
+ ret = -EINVAL;
+ goto error;
+ }
ret = 0;
error:
unlock_chunks(root);
* check if the backing cache is updated to FS-Cache
* - called by FS-Cache when evaluates if need to invalidate the cache
*/
-static bool cachefiles_check_consistency(struct fscache_operation *op)
+static int cachefiles_check_consistency(struct fscache_operation *op)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
for (i = 0; i < num_pages; i++) {
struct page *page = osd_data->pages[i];
- if (rc < 0 && rc != -ENOENT)
+ if (rc < 0 && rc != -ENOENT) {
+ ceph_fscache_readpage_cancel(inode, page);
goto unlock;
+ }
if (bytes < (int)PAGE_SIZE) {
/* zero (remainder of) page */
int s = bytes < 0 ? 0 : bytes;
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
- ceph_readpage_to_fscache(inode, page);
-
set_page_writeback(page);
err = ceph_osdc_writepages(osdc, ceph_vino(inode),
&ci->i_layout, snapc,
#include "cache.h"
struct ceph_aux_inode {
+ u64 version;
struct timespec mtime;
loff_t size;
};
fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
&ceph_fscache_fsid_object_def,
fsc, true);
-
- if (fsc->fscache == NULL) {
+ if (!fsc->fscache)
pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
- return 0;
- }
-
- fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
- if (fsc->revalidate_wq == NULL)
- return -ENOMEM;
return 0;
}
const struct inode* inode = &ci->vfs_inode;
memset(&aux, 0, sizeof(aux));
+ aux.version = ci->i_version;
aux.mtime = inode->i_mtime;
aux.size = i_size_read(inode);
return FSCACHE_CHECKAUX_OBSOLETE;
memset(&aux, 0, sizeof(aux));
+ aux.version = ci->i_version;
aux.mtime = inode->i_mtime;
aux.size = i_size_read(inode);
.now_uncached = ceph_fscache_inode_now_uncached,
};
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
- struct ceph_inode_info* ci)
+void ceph_fscache_register_inode_cookie(struct inode *inode)
{
- struct inode* inode = &ci->vfs_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
/* No caching for filesystem */
if (fsc->fscache == NULL)
return;
/* Only cache for regular files that are read only */
- if ((ci->vfs_inode.i_mode & S_IFREG) == 0)
+ if (!S_ISREG(inode->i_mode))
return;
- /* Avoid multiple racing open requests */
- inode_lock(inode);
-
- if (ci->fscache)
- goto done;
-
- ci->fscache = fscache_acquire_cookie(fsc->fscache,
- &ceph_fscache_inode_object_def,
- ci, true);
- fscache_check_consistency(ci->fscache);
-done:
+ inode_lock_nested(inode, I_MUTEX_CHILD);
+ if (!ci->fscache) {
+ ci->fscache = fscache_acquire_cookie(fsc->fscache,
+ &ceph_fscache_inode_object_def,
+ ci, false);
+ }
inode_unlock(inode);
-
}
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
fscache_relinquish_cookie(cookie, 0);
}
+static bool ceph_fscache_can_enable(void *data)
+{
+ struct inode *inode = data;
+ return !inode_is_open_for_write(inode);
+}
+
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ if (!fscache_cookie_valid(ci->fscache))
+ return;
+
+ if (inode_is_open_for_write(inode)) {
+ dout("fscache_file_set_cookie %p %p disabling cache\n",
+ inode, filp);
+ fscache_disable_cookie(ci->fscache, false);
+ fscache_uncache_all_inode_pages(ci->fscache, inode);
+ } else {
+ fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
+ inode);
+ if (fscache_cookie_enabled(ci->fscache)) {
+ dout("fscache_file_set_cookie %p %p enabing cache\n",
+ inode, filp);
+ }
+ }
+}
+
static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
{
if (!error)
static inline bool cache_valid(struct ceph_inode_info *ci)
{
- return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) &&
- (ci->i_fscache_gen == ci->i_rdcache_gen));
+ return ci->i_fscache_gen == ci->i_rdcache_gen;
}
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
{
- if (fsc->revalidate_wq)
- destroy_workqueue(fsc->revalidate_wq);
-
fscache_relinquish_cookie(fsc->fscache, 0);
fsc->fscache = NULL;
}
-static void ceph_revalidate_work(struct work_struct *work)
-{
- int issued;
- u32 orig_gen;
- struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
- i_revalidate_work);
- struct inode *inode = &ci->vfs_inode;
-
- spin_lock(&ci->i_ceph_lock);
- issued = __ceph_caps_issued(ci, NULL);
- orig_gen = ci->i_rdcache_gen;
- spin_unlock(&ci->i_ceph_lock);
-
- if (!(issued & CEPH_CAP_FILE_CACHE)) {
- dout("revalidate_work lost cache before validation %p\n",
- inode);
- goto out;
- }
-
- if (!fscache_check_consistency(ci->fscache))
- fscache_invalidate(ci->fscache);
-
- spin_lock(&ci->i_ceph_lock);
- /* Update the new valid generation (backwards sanity check too) */
- if (orig_gen > ci->i_fscache_gen) {
- ci->i_fscache_gen = orig_gen;
- }
- spin_unlock(&ci->i_ceph_lock);
-
-out:
- iput(&ci->vfs_inode);
-}
-
-void ceph_queue_revalidate(struct inode *inode)
+/*
+ * caller should hold CEPH_CAP_FILE_{RD,CACHE}
+ */
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
- struct ceph_inode_info *ci = ceph_inode(inode);
-
- if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
+ if (cache_valid(ci))
return;
- ihold(inode);
-
- if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq,
- &ci->i_revalidate_work)) {
- dout("ceph_queue_revalidate %p\n", inode);
- } else {
- dout("ceph_queue_revalidate %p failed\n)", inode);
- iput(inode);
+ /* resue i_truncate_mutex. There should be no pending
+ * truncate while the caller holds CEPH_CAP_FILE_RD */
+ mutex_lock(&ci->i_truncate_mutex);
+ if (!cache_valid(ci)) {
+ if (fscache_check_consistency(ci->fscache))
+ fscache_invalidate(ci->fscache);
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_fscache_gen = ci->i_rdcache_gen;
+ spin_unlock(&ci->i_ceph_lock);
}
-}
-
-void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
- ci->fscache = NULL;
- /* The first load is verifed cookie open time */
- ci->i_fscache_gen = 1;
- INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
+ mutex_unlock(&ci->i_truncate_mutex);
}
int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
-void ceph_fscache_inode_init(struct ceph_inode_info *ci);
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
- struct ceph_inode_info* ci);
+void ceph_fscache_register_inode_cookie(struct inode *inode);
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
int ceph_readpages_from_fscache(struct inode *inode,
unsigned *nr_pages);
void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
-void ceph_queue_revalidate(struct inode *inode);
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
+static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
{
- struct ceph_inode_info *ci = ceph_inode(inode);
- fscache_attr_changed(ci->fscache);
+ ci->fscache = NULL;
+ ci->i_fscache_gen = 0;
}
static inline void ceph_fscache_invalidate(struct inode *inode)
return fscache_readpages_cancel(ci->fscache, pages);
}
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
+{
+ ci->i_fscache_gen = ci->i_rdcache_gen - 1;
+}
+
#else
static inline int ceph_fscache_register(void)
{
}
-static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc,
- struct ceph_inode_info* ci)
+static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
+{
+}
+
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+{
+}
+
+static inline void ceph_fscache_file_set_cookie(struct inode *inode,
+ struct file *filp)
+{
+}
+
+static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
{
}
{
}
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
-{
-}
-
static inline void ceph_fscache_invalidate(struct inode *inode)
{
}
{
}
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
-{
-}
-
static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
{
return 1;
{
}
-static inline void ceph_queue_revalidate(struct inode *inode)
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
{
}
snap_rwsem_locked = true;
}
*got = need | (have & want);
+ if ((need & CEPH_CAP_FILE_RD) &&
+ !(*got & CEPH_CAP_FILE_CACHE))
+ ceph_disable_fscache_readpage(ci);
__take_cap_refs(ci, *got, true);
ret = 1;
}
break;
}
+ if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
+ ceph_fscache_revalidate_cookie(ci);
+
*got = _got;
return 0;
}
bool writeback = false;
bool queue_trunc = false;
bool queue_invalidate = false;
- bool queue_revalidate = false;
bool deleted_inode = false;
bool fill_inline = false;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
}
}
-
- ceph_fscache_invalidate(inode);
}
/* side effects now are allowed */
}
}
- /* Do we need to revalidate our fscache cookie. Don't bother on the
- * first cache cap as we already validate at cookie creation time. */
- if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
- queue_revalidate = true;
-
if (newcaps & CEPH_CAP_ANY_RD) {
/* ctime/mtime/atime? */
ceph_decode_timespec(&mtime, &grant->mtime);
if (fill_inline)
ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
- if (queue_trunc) {
+ if (queue_trunc)
ceph_queue_vmtruncate(inode);
- ceph_queue_revalidate(inode);
- } else if (queue_revalidate)
- ceph_queue_revalidate(inode);
if (writeback)
/*
truncate_seq, truncate_size, size);
spin_unlock(&ci->i_ceph_lock);
- if (queue_trunc) {
+ if (queue_trunc)
ceph_queue_vmtruncate(inode);
- ceph_fscache_invalidate(inode);
- }
}
/*
}
dentry = d_obtain_alias(inode);
- if (IS_ERR(dentry)) {
- iput(inode);
+ if (IS_ERR(dentry))
return dentry;
- }
err = ceph_init_dentry(dentry);
if (err < 0) {
dput(dentry);
return ERR_PTR(-ENOENT);
dentry = d_obtain_alias(inode);
- if (IS_ERR(dentry)) {
- iput(inode);
+ if (IS_ERR(dentry))
return dentry;
- }
err = ceph_init_dentry(dentry);
if (err < 0) {
dput(dentry);
dout("fh_to_parent %llx\n", cfh->parent_ino);
dentry = __get_parent(sb, NULL, cfh->ino);
- if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT)
+ if (unlikely(dentry == ERR_PTR(-ENOENT)))
dentry = __fh_to_dentry(sb, cfh->parent_ino);
return dentry;
}
{
struct ceph_file_info *cf;
int ret = 0;
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
- struct ceph_mds_client *mdsc = fsc->mdsc;
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
- /* First file open request creates the cookie, we want to keep
- * this cookie around for the filetime of the inode as not to
- * have to worry about fscache register / revoke / operation
- * races.
- *
- * Also, if we know the operation is going to invalidate data
- * (non readonly) just nuke the cache right away.
- */
- ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
- if ((fmode & CEPH_FILE_MODE_WR))
- ceph_fscache_invalidate(inode);
+ ceph_fscache_register_inode_cookie(inode);
+ ceph_fscache_file_set_cookie(inode, file);
case S_IFDIR:
dout("init_file %p %p 0%o (regular)\n", inode, file,
inode->i_mode);
if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
dn = ceph_finish_lookup(req, dentry, err);
if (IS_ERR(dn))
err = PTR_ERR(dn);
}
retry_snap:
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
err = -ENOSPC;
goto out;
}
iov_iter_advance(from, written);
ceph_put_snap_context(snapc);
} else {
- loff_t old_size = i_size_read(inode);
/*
* No need to acquire the i_truncate_mutex. Because
* the MDS revokes Fwb caps before sending truncate
written = generic_perform_write(file, from, pos);
if (likely(written >= 0))
iocb->ki_pos = pos + written;
- if (i_size_read(inode) > old_size)
- ceph_fscache_update_objectsize(inode);
inode_unlock(inode);
}
ceph_put_cap_refs(ci, got);
if (written >= 0) {
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
iocb->ki_flags |= IOCB_DSYNC;
written = generic_write_sync(iocb, written);
goto unlock;
}
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
- !(mode & FALLOC_FL_PUNCH_HOLE)) {
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
+ !(mode & FALLOC_FL_PUNCH_HOLE)) {
ret = -ENOSPC;
goto unlock;
}
#ifdef CONFIG_CEPH_FSCACHE
struct fscache_cookie *fscache;
- struct workqueue_struct *revalidate_wq;
#endif
};
#ifdef CONFIG_CEPH_FSCACHE
struct fscache_cookie *fscache;
- u32 i_fscache_gen; /* sequence, for delayed fscache validate */
- struct work_struct i_revalidate_work;
+ u32 i_fscache_gen;
#endif
struct inode vfs_inode; /* at end */
};
case SFM_SLASH:
*target = '\\';
break;
+ case SFM_SPACE:
+ *target = ' ';
+ break;
+ case SFM_PERIOD:
+ *target = '.';
+ break;
default:
return false;
}
return dest_char;
}
-static __le16 convert_to_sfm_char(char src_char)
+static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
{
__le16 dest_char;
case '|':
dest_char = cpu_to_le16(SFM_PIPE);
break;
+ case '.':
+ if (end_of_string)
+ dest_char = cpu_to_le16(SFM_PERIOD);
+ else
+ dest_char = 0;
+ break;
+ case ' ':
+ if (end_of_string)
+ dest_char = cpu_to_le16(SFM_SPACE);
+ else
+ dest_char = 0;
+ break;
default:
dest_char = 0;
}
/* see if we must remap this char */
if (map_chars == SFU_MAP_UNI_RSVD)
dst_char = convert_to_sfu_char(src_char);
- else if (map_chars == SFM_MAP_UNI_RSVD)
- dst_char = convert_to_sfm_char(src_char);
- else
+ else if (map_chars == SFM_MAP_UNI_RSVD) {
+ bool end_of_string;
+
+ if (i == srclen - 1)
+ end_of_string = true;
+ else
+ end_of_string = false;
+
+ dst_char = convert_to_sfm_char(src_char, end_of_string);
+ } else
dst_char = 0;
/*
* FIXME: We can not handle remapping backslash (UNI_SLASH)
#define SFM_LESSTHAN ((__u16) 0xF023)
#define SFM_PIPE ((__u16) 0xF027)
#define SFM_SLASH ((__u16) 0xF026)
+#define SFM_PERIOD ((__u16) 0xF028)
+#define SFM_SPACE ((__u16) 0xF029)
/*
* Mapping mechanism to use when one of the seven reserved characters is
extern mempool_t *cifs_mid_poolp;
struct workqueue_struct *cifsiod_wq;
+__u32 cifs_lock_secret;
/*
* Bumps refcount for cifs super block.
spin_lock_init(&cifs_file_list_lock);
spin_lock_init(&GlobalMid_Lock);
+ get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
+
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+extern __u32 cifs_lock_secret;
extern mempool_t *cifs_mid_poolp;
* server->ops->need_neg() == true. Also, no need to ping if
* we got a response recently.
*/
- if (!server->ops->need_neg || server->ops->need_neg(server) ||
+
+ if (server->tcpStatus == CifsNeedReconnect ||
+ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + echo_interval - HZ))
goto requeue_echo;
* Check for hashed negative dentry. We have already revalidated
* the dentry and it is fine. No need to perform another lookup.
*/
- if (!d_unhashed(direntry))
+ if (!d_in_lookup(direntry))
return -ENOENT;
res = cifs_lookup(inode, direntry, 0);
return rc;
}
+static __u32
+hash_lockowner(fl_owner_t owner)
+{
+ return cifs_lock_secret ^ hash32_ptr((const void *)owner);
+}
+
struct lock_to_push {
struct list_head llist;
__u64 offset;
else
type = CIFS_WRLCK;
lck = list_entry(el, struct lock_to_push, llist);
- lck->pid = flock->fl_pid;
+ lck->pid = hash_lockowner(flock->fl_owner);
lck->netfid = cfile->fid.netfid;
lck->length = length;
lck->type = type;
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
- rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
+ rc = CIFSSMBPosixLock(xid, tcon, netfid,
+ hash_lockowner(flock->fl_owner),
flock->fl_start, length, flock,
posix_lock_type, wait_flag);
return rc;
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
- current->tgid, flock->fl_start, length,
+ hash_lockowner(flock->fl_owner),
+ flock->fl_start, length,
NULL, posix_lock_type, wait_flag);
goto out;
}
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
+int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp);
sec_blob->DomainName.MaximumLength = 0;
}
-/* We do not malloc the blob, it is passed in pbuffer, because its
- maximum possible size is fixed and small, making this approach cleaner.
- This function returns the length of the data in the blob */
-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+static int size_of_ntlmssp_blob(struct cifs_ses *ses)
+{
+ int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
+ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+
+ if (ses->domainName)
+ sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ else
+ sz += 2;
+
+ if (ses->user_name)
+ sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ else
+ sz += 2;
+
+ return sz;
+}
+
+int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc;
- AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
+ AUTHENTICATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
+ rc = setup_ntlmv2_rsp(ses, nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+ *buflen = 0;
+ goto setup_ntlmv2_ret;
+ }
+ *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+ sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
+
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
}
- tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
sec_blob->LmChallengeResponse.BufferOffset =
sec_blob->LmChallengeResponse.Length = 0;
sec_blob->LmChallengeResponse.MaximumLength = 0;
- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->NtChallengeResponse.BufferOffset =
+ cpu_to_le32(tmp - *pbuffer);
if (ses->user_name != NULL) {
- rc = setup_ntlmv2_rsp(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
- goto setup_ntlmv2_ret;
- }
memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
ses->auth_key.len - CIFS_SESS_KEY_SIZE);
tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
}
if (ses->domainName == NULL) {
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = 0;
sec_blob->DomainName.MaximumLength = 0;
tmp += 2;
} else {
int len;
len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
- CIFS_MAX_USERNAME_LEN, nls_cp);
+ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = cpu_to_le16(len);
sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
if (ses->user_name == NULL) {
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
CIFS_MAX_USERNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = cpu_to_le16(len);
sec_blob->UserName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->WorkstationName.Length = 0;
sec_blob->WorkstationName.MaximumLength = 0;
tmp += 2;
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
&& !calc_seckey(ses)) {
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.MaximumLength =
cpu_to_le16(CIFS_CPHTXT_SIZE);
tmp += CIFS_CPHTXT_SIZE;
} else {
- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = 0;
sec_blob->SessionKey.MaximumLength = 0;
}
+ *buflen = tmp - *pbuffer;
setup_ntlmv2_ret:
- *buflen = tmp - pbuffer;
return rc;
}
rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
true : false, lnm_session_key);
+ if (rc)
+ goto out;
memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
bcc_ptr += CIFS_AUTH_RESP_SIZE;
struct cifs_ses *ses = sess_data->ses;
__u16 bytes_remaining;
char *bcc_ptr;
- char *ntlmsspblob = NULL;
+ unsigned char *ntlmsspblob = NULL;
u16 blob_len;
cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
/* Build security blob before we assemble the request */
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
smb_buf = (struct smb_hdr *)pSMB;
- /*
- * 5 is an empirical value, large enough to hold
- * authenticate message plus max 10 of av paris,
- * domain, user, workstation names, flags, etc.
- */
- ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
- GFP_KERNEL);
- if (!ntlmsspblob) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = build_ntlmssp_auth_blob(ntlmsspblob,
+ rc = build_ntlmssp_auth_blob(&ntlmsspblob,
&blob_len, ses, sess_data->nls_cp);
if (rc)
goto out_free_ntlmsspblob;
u16 blob_length = 0;
struct key *spnego_key = NULL;
char *security_blob = NULL;
- char *ntlmssp_blob = NULL;
+ unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
cifs_dbg(FYI, "Session Setup\n");
iov[1].iov_len = blob_length;
} else if (phase == NtLmAuthenticate) {
req->hdr.SessionId = ses->Suid;
- ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
- GFP_KERNEL);
- if (ntlmssp_blob == NULL) {
- rc = -ENOMEM;
- goto ssetup_exit;
- }
- rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
+ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
nls_cp);
if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
cifs_dbg(FYI, "In echo request\n");
+ if (server->tcpStatus == CifsNeedNegotiate) {
+ struct list_head *tmp, *tmp2;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(tmp, &server->smb_ses_list) {
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each(tmp2, &ses->tcon_list) {
+ tcon = list_entry(tmp2, struct cifs_tcon,
+ tcon_list);
+ /* add check for persistent handle reconnect */
+ if (tcon && tcon->need_reconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ rc = smb2_reconnect(SMB2_ECHO, tcon);
+ spin_lock(&cifs_tcp_ses_lock);
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
+ /* if no session, renegotiate failed above */
+ if (server->tcpStatus == CifsNeedNegotiate)
+ return -EIO;
+
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
if (rc)
return rc;
len = simple_write_to_buffer(buffer->bin_buffer,
buffer->bin_buffer_size, ppos, buf, count);
- if (len > 0)
- *ppos += len;
out:
mutex_unlock(&buffer->mutex);
return len;
return 0;
file->f_pos = pos;
cprm->written += n;
+ cprm->pos += n;
nr -= n;
}
return 1;
if (dump_interrupted() ||
file->f_op->llseek(file, nr, SEEK_CUR) < 0)
return 0;
+ cprm->pos += nr;
return 1;
} else {
while (nr > PAGE_SIZE) {
int dump_align(struct coredump_params *cprm, int align)
{
- unsigned mod = cprm->file->f_pos & (align - 1);
+ unsigned mod = cprm->pos & (align - 1);
if (align & (align - 1))
return 0;
return mod ? dump_skip(cprm, align - mod) : 1;
dax.addr += first;
size = map_len - first;
}
- max = min(pos + size, end);
+ /*
+ * pos + size is one past the last offset for IO,
+ * so pos + size can overflow loff_t at extreme offsets.
+ * Cast to u64 to catch this and get the true minimum.
+ */
+ max = min_t(u64, pos + size, end);
}
if (iov_iter_rw(iter) == WRITE) {
}
EXPORT_SYMBOL(d_drop);
+static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
+{
+ struct dentry *next;
+ /*
+ * Inform d_walk() and shrink_dentry_list() that we are no longer
+ * attached to the dentry tree
+ */
+ dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ if (unlikely(list_empty(&dentry->d_child)))
+ return;
+ __list_del_entry(&dentry->d_child);
+ /*
+ * Cursors can move around the list of children. While we'd been
+ * a normal list member, it didn't matter - ->d_child.next would've
+ * been updated. However, from now on it won't be and for the
+ * things like d_walk() it might end up with a nasty surprise.
+ * Normally d_walk() doesn't care about cursors moving around -
+ * ->d_lock on parent prevents that and since a cursor has no children
+ * of its own, we get through it without ever unlocking the parent.
+ * There is one exception, though - if we ascend from a child that
+ * gets killed as soon as we unlock it, the next sibling is found
+ * using the value left in its ->d_child.next. And if _that_
+ * pointed to a cursor, and cursor got moved (e.g. by lseek())
+ * before d_walk() regains parent->d_lock, we'll end up skipping
+ * everything the cursor had been moved past.
+ *
+ * Solution: make sure that the pointer left behind in ->d_child.next
+ * points to something that won't be moving around. I.e. skip the
+ * cursors.
+ */
+ while (dentry->d_child.next != &parent->d_subdirs) {
+ next = list_entry(dentry->d_child.next, struct dentry, d_child);
+ if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
+ break;
+ dentry->d_child.next = next->d_child.next;
+ }
+}
+
static void __dentry_kill(struct dentry *dentry)
{
struct dentry *parent = NULL;
}
/* if it was on the hash then remove it */
__d_drop(dentry);
- __list_del_entry(&dentry->d_child);
- /*
- * Inform d_walk() that we are no longer attached to the
- * dentry tree
- */
- dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ dentry_unlist(dentry, parent);
if (parent)
spin_unlock(&parent->d_lock);
dentry_iput(dentry);
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+ continue;
+
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
ret = enter(data, dentry);
struct dentry *dentry = __d_alloc(parent->d_sb, name);
if (!dentry)
return NULL;
-
+ dentry->d_flags |= DCACHE_RCUACCESS;
spin_lock(&parent->d_lock);
/*
* don't need child lock because it is not subject
}
EXPORT_SYMBOL(d_alloc);
+struct dentry *d_alloc_cursor(struct dentry * parent)
+{
+ struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
+ if (dentry) {
+ dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
+ dentry->d_parent = dget(parent);
+ }
+ return dentry;
+}
+
/**
* d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
* @sb: the superblock
{
BUG_ON(!d_unhashed(entry));
hlist_bl_lock(b);
- entry->d_flags |= DCACHE_RCUACCESS;
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
}
rcu_read_unlock();
goto retry;
}
- rcu_read_unlock();
/*
* No changes for the parent since the beginning of d_lookup().
* Since all removals from the chain happen with hlist_bl_lock(),
continue;
if (dentry->d_parent != parent)
continue;
- if (d_unhashed(dentry))
- continue;
if (parent->d_flags & DCACHE_OP_COMPARE) {
int tlen = dentry->d_name.len;
const char *tname = dentry->d_name.name;
if (dentry_cmp(dentry, str, len))
continue;
}
- dget(dentry);
hlist_bl_unlock(b);
- /* somebody is doing lookup for it right now; wait for it */
+ /* now we can try to grab a reference */
+ if (!lockref_get_not_dead(&dentry->d_lockref)) {
+ rcu_read_unlock();
+ goto retry;
+ }
+
+ rcu_read_unlock();
+ /*
+ * somebody is likely to be still doing lookup for it;
+ * wait for them to finish
+ */
spin_lock(&dentry->d_lock);
d_wait_lookup(dentry);
/*
dput(new);
return dentry;
}
+ rcu_read_unlock();
/* we can't take ->d_lock here; it's OK, though. */
new->d_flags |= DCACHE_PAR_LOOKUP;
new->d_wait = wq;
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
/* splicing a tree */
+ dentry->d_flags |= DCACHE_RCUACCESS;
dentry->d_parent = target->d_parent;
target->d_parent = target;
list_del_init(&target->d_child);
r = real_fops->open(inode, filp);
out:
- fops_put(real_fops);
debugfs_use_file_finish(srcu_idx);
return r;
}
if (real_fops->open) {
r = real_fops->open(inode, filp);
-
- if (filp->f_op != proxy_fops) {
+ if (r) {
+ replace_fops(filp, d_inode(dentry)->i_fop);
+ goto free_proxy;
+ } else if (filp->f_op != proxy_fops) {
/* No protection against file removal anymore. */
WARN(1, "debugfs file owner replaced proxy fops: %pd",
dentry);
static DEFINE_MUTEX(allocated_ptys_lock);
-static struct vfsmount *devpts_mnt;
-
struct pts_mount_opts {
int setuid;
int setgid;
kgid_t gid;
umode_t mode;
umode_t ptmxmode;
- int newinstance;
+ int reserve;
int max;
};
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_mode, "mode=%o"},
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
{Opt_ptmxmode, "ptmxmode=%o"},
{Opt_newinstance, "newinstance"},
{Opt_max, "max=%d"},
-#endif
{Opt_err, NULL}
};
return sb->s_fs_info;
}
-static inline struct super_block *pts_sb_from_inode(struct inode *inode)
+struct pts_fs_info *devpts_acquire(struct file *filp)
{
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
- if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
- return inode->i_sb;
-#endif
- if (!devpts_mnt)
- return NULL;
- return devpts_mnt->mnt_sb;
+ struct pts_fs_info *result;
+ struct path path;
+ struct super_block *sb;
+ int err;
+
+ path = filp->f_path;
+ path_get(&path);
+
+ /* Has the devpts filesystem already been found? */
+ sb = path.mnt->mnt_sb;
+ if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
+ /* Is a devpts filesystem at "pts" in the same directory? */
+ err = path_pts(&path);
+ if (err) {
+ result = ERR_PTR(err);
+ goto out;
+ }
+
+ /* Is the path the root of a devpts filesystem? */
+ result = ERR_PTR(-ENODEV);
+ sb = path.mnt->mnt_sb;
+ if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
+ (path.mnt->mnt_root != sb->s_root))
+ goto out;
+ }
+
+ /*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+ atomic_inc(&sb->s_active);
+ result = DEVPTS_SB(sb);
+
+out:
+ path_put(&path);
+ return result;
+}
+
+void devpts_release(struct pts_fs_info *fsi)
+{
+ deactivate_super(fsi->sb);
}
#define PARSE_MOUNT 0
/*
* parse_mount_options():
* Set @opts to mount options specified in @data. If an option is not
- * specified in @data, set it to its default value. The exception is
- * 'newinstance' option which can only be set/cleared on a mount (i.e.
- * cannot be changed during remount).
+ * specified in @data, set it to its default value.
*
* Note: @data may be NULL (in which case all options are set to default).
*/
opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
opts->max = NR_UNIX98_PTY_MAX;
- /* newinstance makes sense only on initial mount */
+ /* Only allow instances mounted from the initial mount
+ * namespace to tap the reserve pool of ptys.
+ */
if (op == PARSE_MOUNT)
- opts->newinstance = 0;
+ opts->reserve =
+ (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
while ((p = strsep(&data, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
return -EINVAL;
opts->mode = option & S_IALLUGO;
break;
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
case Opt_ptmxmode:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->ptmxmode = option & S_IALLUGO;
break;
case Opt_newinstance:
- /* newinstance makes sense only on initial mount */
- if (op == PARSE_MOUNT)
- opts->newinstance = 1;
break;
case Opt_max:
if (match_int(&args[0], &option) ||
return -EINVAL;
opts->max = option;
break;
-#endif
default:
pr_err("called with bogus options\n");
return -EINVAL;
return 0;
}
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
static int mknod_ptmx(struct super_block *sb)
{
int mode;
inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
}
}
-#else
-static inline void update_ptmx_mode(struct pts_fs_info *fsi)
-{
- return;
-}
-#endif
static int devpts_remount(struct super_block *sb, int *flags, char *data)
{
seq_printf(seq, ",gid=%u",
from_kgid_munged(&init_user_ns, opts->gid));
seq_printf(seq, ",mode=%03o", opts->mode);
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
if (opts->max < NR_UNIX98_PTY_MAX)
seq_printf(seq, ",max=%d", opts->max);
-#endif
return 0;
}
return -ENOMEM;
}
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-static int compare_init_pts_sb(struct super_block *s, void *p)
-{
- if (devpts_mnt)
- return devpts_mnt->mnt_sb == s;
- return 0;
-}
-
/*
* devpts_mount()
*
- * If the '-o newinstance' mount option was specified, mount a new
- * (private) instance of devpts. PTYs created in this instance are
- * independent of the PTYs in other devpts instances.
- *
- * If the '-o newinstance' option was not specified, mount/remount the
- * initial kernel mount of devpts. This type of mount gives the
- * legacy, single-instance semantics.
- *
- * The 'newinstance' option is needed to support multiple namespace
- * semantics in devpts while preserving backward compatibility of the
- * current 'single-namespace' semantics. i.e all mounts of devpts
- * without the 'newinstance' mount option should bind to the initial
- * kernel mount, like mount_single().
- *
- * Mounts with 'newinstance' option create a new, private namespace.
- *
- * NOTE:
- *
- * For single-mount semantics, devpts cannot use mount_single(),
- * because mount_single()/sget() find and use the super-block from
- * the most recent mount of devpts. But that recent mount may be a
- * 'newinstance' mount and mount_single() would pick the newinstance
- * super-block instead of the initial super-block.
+ * Mount a new (private) instance of devpts. PTYs created in this
+ * instance are independent of the PTYs in other devpts instances.
*/
static struct dentry *devpts_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
if (error)
return ERR_PTR(error);
- /* Require newinstance for all user namespace mounts to ensure
- * the mount options are not changed.
- */
- if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
- return ERR_PTR(-EINVAL);
-
- if (opts.newinstance)
- s = sget(fs_type, NULL, set_anon_super, flags, NULL);
- else
- s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
- NULL);
-
+ s = sget(fs_type, NULL, set_anon_super, flags, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
return ERR_PTR(error);
}
-#else
-/*
- * This supports only the legacy single-instance semantics (no
- * multiple-instance semantics)
- */
-static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- return mount_single(fs_type, flags, data, devpts_fill_super);
-}
-#endif
-
static void devpts_kill_sb(struct super_block *sb)
{
struct pts_fs_info *fsi = DEVPTS_SB(sb);
.name = "devpts",
.mount = devpts_mount,
.kill_sb = devpts_kill_sb,
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
.fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
-#endif
};
/*
int index;
int ida_ret;
- if (!fsi)
- return -ENODEV;
-
retry:
if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&allocated_ptys_lock);
- if (pty_count >= pty_limit -
- (fsi->mount_opts.newinstance ? pty_reserve : 0)) {
+ if (pty_count >= (pty_limit -
+ (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
mutex_unlock(&allocated_ptys_lock);
return -ENOSPC;
}
mutex_unlock(&allocated_ptys_lock);
}
-/*
- * pty code needs to hold extra references in case of last /dev/tty close
- */
-struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
-{
- struct super_block *sb;
- struct pts_fs_info *fsi;
-
- sb = pts_sb_from_inode(ptmx_inode);
- if (!sb)
- return NULL;
- fsi = DEVPTS_SB(sb);
- if (!fsi)
- return NULL;
-
- atomic_inc(&sb->s_active);
- return fsi;
-}
-
-void devpts_put_ref(struct pts_fs_info *fsi)
-{
- deactivate_super(fsi->sb);
-}
-
/**
* devpts_pty_new -- create a new inode in /dev/pts/
* @ptmx_inode: inode of the master
struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
{
struct dentry *dentry;
- struct super_block *sb;
+ struct super_block *sb = fsi->sb;
struct inode *inode;
struct dentry *root;
struct pts_mount_opts *opts;
char s[12];
- if (!fsi)
- return ERR_PTR(-ENODEV);
-
- sb = fsi->sb;
root = sb->s_root;
opts = &fsi->mount_opts;
static int __init init_devpts_fs(void)
{
int err = register_filesystem(&devpts_fs_type);
- struct ctl_table_header *table;
-
if (!err) {
- struct vfsmount *mnt;
-
- table = register_sysctl_table(pty_root_table);
- mnt = kern_mount(&devpts_fs_type);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
- unregister_filesystem(&devpts_fs_type);
- unregister_sysctl_table(table);
- } else {
- devpts_mnt = mnt;
- }
+ register_sysctl_table(pty_root_table);
}
return err;
}
* ecryptfs_to_hex
* @dst: Buffer to take hex character representation of contents of
* src; must be at least of size (src_size * 2)
- * @src: Buffer to be converted to a hex string respresentation
+ * @src: Buffer to be converted to a hex string representation
* @src_size: number of bytes to convert
*/
void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
* ecryptfs_from_hex
* @dst: Buffer to take the bytes from src hex; must be at least of
* size (src_size / 2)
- * @src: Buffer to be converted from a hex string respresentation to raw value
+ * @src: Buffer to be converted from a hex string representation to raw value
* @dst_size: size of dst buffer, or number of hex characters pairs to convert
*/
void ecryptfs_from_hex(char *dst, char *src, int dst_size)
};
/* Add support for additional ciphers by adding elements here. The
- * cipher_code is whatever OpenPGP applicatoins use to identify the
+ * cipher_code is whatever OpenPGP applications use to identify the
* ciphers. List in order of probability. */
static struct ecryptfs_cipher_code_str_map_elem
ecryptfs_cipher_code_str_map[] = {
*
* Common entry point for reading file metadata. From here, we could
* retrieve the header information from the header region of the file,
- * the xattr region of the file, or some other repostory that is
+ * the xattr region of the file, or some other repository that is
* stored separately from the file itself. The current implementation
* supports retrieving the metadata information from the file contents
* and from the xattr region.
return rc;
}
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+ /*
+ * Don't allow mmap on top of file systems that don't support it
+ * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
+ * allows recursive mounting, this will need to be extended.
+ */
+ if (!lower_file->f_op->mmap)
+ return -ENODEV;
+ return generic_file_mmap(file, vma);
+}
+
/**
* ecryptfs_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
/**
* ecryptfs_dir_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ecryptfs_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
struct ecryptfs_cache_info *info;
info = &ecryptfs_cache_infos[i];
- if (*(info->cache))
- kmem_cache_destroy(*(info->cache));
+ kmem_cache_destroy(*(info->cache));
}
}
goto out_free;
}
inode->i_state |= I_WB_SWITCH;
+ __iget(inode);
spin_unlock(&inode->i_lock);
- ihold(inode);
isw->inode = inode;
atomic_inc(&isw_nr_in_flight);
put_page(results[i]);
}
+ wake_up_bit(&cookie->flags, 0);
+
_leave("");
}
struct dentry *newent;
bool outarg_valid = true;
+ fuse_lock_inode(dir);
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
&outarg, &inode);
+ fuse_unlock_inode(dir);
if (err == -ENOENT) {
outarg_valid = false;
err = 0;
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
- if (d_unhashed(entry)) {
+ if (d_in_lookup(entry)) {
res = fuse_lookup(dir, entry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
FUSE_READDIR);
}
+ fuse_lock_inode(inode);
fuse_request_send(fc, req);
+ fuse_unlock_inode(inode);
nbytes = req->out.args[0].size;
err = req->out.h.error;
fuse_put_request(fc, req);
/** Miscellaneous bits describing inode state */
unsigned long state;
+
+ /** Lock for serializing lookup and readdir for back compatibility*/
+ struct mutex mutex;
};
/** FUSE inode state bits */
/** write-back cache policy (default is write-through) */
unsigned writeback_cache:1;
+ /** allow parallel lookups and readdir (default is serialized) */
+ unsigned parallel_dirops:1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
void fuse_set_initialized(struct fuse_conn *fc);
+void fuse_unlock_inode(struct inode *inode);
+void fuse_lock_inode(struct inode *inode);
+
#endif /* _FS_FUSE_I_H */
INIT_LIST_HEAD(&fi->queued_writes);
INIT_LIST_HEAD(&fi->writepages);
init_waitqueue_head(&fi->page_waitq);
+ mutex_init(&fi->mutex);
fi->forget = fuse_alloc_forget();
if (!fi->forget) {
kmem_cache_free(fuse_inode_cachep, inode);
struct fuse_inode *fi = get_fuse_inode(inode);
BUG_ON(!list_empty(&fi->write_files));
BUG_ON(!list_empty(&fi->queued_writes));
+ mutex_destroy(&fi->mutex);
kfree(fi->forget);
call_rcu(&inode->i_rcu, fuse_i_callback);
}
return 0;
}
+void fuse_lock_inode(struct inode *inode)
+{
+ if (!get_fuse_conn(inode)->parallel_dirops)
+ mutex_lock(&get_fuse_inode(inode)->mutex);
+}
+
+void fuse_unlock_inode(struct inode *inode)
+{
+ if (!get_fuse_conn(inode)->parallel_dirops)
+ mutex_unlock(&get_fuse_inode(inode)->mutex);
+}
+
static void fuse_umount_begin(struct super_block *sb)
{
fuse_abort_conn(get_fuse_conn_super(sb));
fc->async_dio = 1;
if (arg->flags & FUSE_WRITEBACK_CACHE)
fc->writeback_cache = 1;
+ if (arg->flags & FUSE_PARALLEL_DIROPS)
+ fc->parallel_dirops = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
} else {
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
- FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
+ FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
+ FUSE_PARALLEL_DIROPS;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
struct dentry *d;
bool excl = !!(flags & O_EXCL);
- if (!d_unhashed(dentry))
+ if (!d_in_lookup(dentry))
goto skip_lookup;
d = __gfs2_lookup(dir, dentry, file, opened);
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
extern int d_set_mounted(struct dentry *dentry);
extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
+extern struct dentry *d_alloc_cursor(struct dentry *);
/*
* read_write.c
BUG_ON(size & (size-1)); /* Must be a power of 2 */
- flags |= __GFP_REPEAT;
- if (size == PAGE_SIZE)
- ptr = (void *)__get_free_pages(flags, 0);
- else if (size > PAGE_SIZE) {
- int order = get_order(size);
-
- if (order < 3)
- ptr = (void *)__get_free_pages(flags, order);
- else
- ptr = vmalloc(size);
- } else
+ if (size < PAGE_SIZE)
ptr = kmem_cache_alloc(get_slab(size), flags);
+ else
+ ptr = (void *)__get_free_pages(flags, get_order(size));
/* Check alignment; SLUB has gotten this wrong in the past,
* and this can lead to user data corruption! */
void jbd2_free(void *ptr, size_t size)
{
- if (size == PAGE_SIZE) {
- free_pages((unsigned long)ptr, 0);
- return;
- }
- if (size > PAGE_SIZE) {
- int order = get_order(size);
-
- if (order < 3)
- free_pages((unsigned long)ptr, order);
- else
- vfree(ptr);
- return;
- }
- kmem_cache_free(get_slab(size), ptr);
+ if (size < PAGE_SIZE)
+ kmem_cache_free(get_slab(size), ptr);
+ else
+ free_pages((unsigned long)ptr, get_order(size));
};
/*
int dcache_dir_open(struct inode *inode, struct file *file)
{
- static struct qstr cursor_name = QSTR_INIT(".", 1);
-
- file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
+ file->private_data = d_alloc_cursor(file->f_path.dentry);
return file->private_data ? 0 : -ENOMEM;
}
}
EXPORT_SYMBOL(dcache_dir_close);
+/* parent is locked at least shared */
+static struct dentry *next_positive(struct dentry *parent,
+ struct list_head *from,
+ int count)
+{
+ unsigned *seq = &parent->d_inode->i_dir_seq, n;
+ struct dentry *res;
+ struct list_head *p;
+ bool skipped;
+ int i;
+
+retry:
+ i = count;
+ skipped = false;
+ n = smp_load_acquire(seq) & ~1;
+ res = NULL;
+ rcu_read_lock();
+ for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+ struct dentry *d = list_entry(p, struct dentry, d_child);
+ if (!simple_positive(d)) {
+ skipped = true;
+ } else if (!--i) {
+ res = d;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (skipped) {
+ smp_rmb();
+ if (unlikely(*seq != n))
+ goto retry;
+ }
+ return res;
+}
+
+static void move_cursor(struct dentry *cursor, struct list_head *after)
+{
+ struct dentry *parent = cursor->d_parent;
+ unsigned n, *seq = &parent->d_inode->i_dir_seq;
+ spin_lock(&parent->d_lock);
+ for (;;) {
+ n = *seq;
+ if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
+ break;
+ cpu_relax();
+ }
+ __list_del(cursor->d_child.prev, cursor->d_child.next);
+ if (after)
+ list_add(&cursor->d_child, after);
+ else
+ list_add_tail(&cursor->d_child, &parent->d_subdirs);
+ smp_store_release(seq, n + 2);
+ spin_unlock(&parent->d_lock);
+}
+
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *dentry = file->f_path.dentry;
if (offset != file->f_pos) {
file->f_pos = offset;
if (file->f_pos >= 2) {
- struct list_head *p;
struct dentry *cursor = file->private_data;
+ struct dentry *to;
loff_t n = file->f_pos - 2;
- spin_lock(&dentry->d_lock);
- /* d_lock not required for cursor */
- list_del(&cursor->d_child);
- p = dentry->d_subdirs.next;
- while (n && p != &dentry->d_subdirs) {
- struct dentry *next;
- next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (simple_positive(next))
- n--;
- spin_unlock(&next->d_lock);
- p = p->next;
- }
- list_add_tail(&cursor->d_child, p);
- spin_unlock(&dentry->d_lock);
+ inode_lock_shared(dentry->d_inode);
+ to = next_positive(dentry, &dentry->d_subdirs, n);
+ move_cursor(cursor, to ? &to->d_child : NULL);
+ inode_unlock_shared(dentry->d_inode);
}
}
return offset;
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
- struct list_head *p, *q = &cursor->d_child;
+ struct list_head *p = &cursor->d_child;
+ struct dentry *next;
+ bool moved = false;
if (!dir_emit_dots(file, ctx))
return 0;
- spin_lock(&dentry->d_lock);
- if (ctx->pos == 2)
- list_move(q, &dentry->d_subdirs);
- for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
- struct dentry *next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (!simple_positive(next)) {
- spin_unlock(&next->d_lock);
- continue;
- }
-
- spin_unlock(&next->d_lock);
- spin_unlock(&dentry->d_lock);
+ if (ctx->pos == 2)
+ p = &dentry->d_subdirs;
+ while ((next = next_positive(dentry, p, 1)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino, dt_type(d_inode(next))))
- return 0;
- spin_lock(&dentry->d_lock);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- /* next is still alive */
- list_move(q, p);
- spin_unlock(&next->d_lock);
- p = q;
+ break;
+ moved = true;
+ p = &next->d_child;
ctx->pos++;
}
- spin_unlock(&dentry->d_lock);
+ if (moved)
+ move_cursor(cursor, p);
return 0;
}
EXPORT_SYMBOL(dcache_readdir);
};
#endif
-static void lockd_svc_exit_thread(void)
+static void lockd_unregister_notifiers(void)
{
unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
+}
+
+static void lockd_svc_exit_thread(void)
+{
+ lockd_unregister_notifiers();
svc_exit_thread(nlmsvc_rqst);
}
* Note: svc_serv structures have an initial use count of 1,
* so we exit through here on both success and failure.
*/
-err_net:
+err_put:
svc_destroy(serv);
err_create:
mutex_unlock(&nlmsvc_mutex);
err_start:
lockd_down_net(serv, net);
- goto err_net;
+err_net:
+ lockd_unregister_notifiers();
+ goto err_put;
}
EXPORT_SYMBOL_GPL(lockd_up);
{
struct file_lock *fl, *my_fl = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
}
}
+static int path_parent_directory(struct path *path)
+{
+ struct dentry *old = path->dentry;
+ /* rare case of legitimate dget_parent()... */
+ path->dentry = dget_parent(path->dentry);
+ dput(old);
+ if (unlikely(!path_connected(path)))
+ return -ENOENT;
+ return 0;
+}
+
static int follow_dotdot(struct nameidata *nd)
{
while(1) {
- struct dentry *old = nd->path.dentry;
-
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
if (nd->path.dentry != nd->path.mnt->mnt_root) {
- /* rare case of legitimate dget_parent()... */
- nd->path.dentry = dget_parent(nd->path.dentry);
- dput(old);
- if (unlikely(!path_connected(&nd->path)))
- return -ENOENT;
+ int ret = path_parent_directory(&nd->path);
+ if (ret)
+ return ret;
break;
}
if (!follow_up(&nd->path))
}
EXPORT_SYMBOL(lookup_one_len_unlocked);
+#ifdef CONFIG_UNIX98_PTYS
+int path_pts(struct path *path)
+{
+ /* Find something mounted on "pts" in the same directory as
+ * the input path.
+ */
+ struct dentry *child, *parent;
+ struct qstr this;
+ int ret;
+
+ ret = path_parent_directory(path);
+ if (ret)
+ return ret;
+
+ parent = path->dentry;
+ this.name = "pts";
+ this.len = 3;
+ child = d_hash_and_lookup(parent, &this);
+ if (!child)
+ return -ENOENT;
+
+ path->dentry = child;
+ dput(parent);
+ follow_mount(path);
+ return 0;
+}
+#endif
+
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
}
if (*opened & FILE_CREATED)
fsnotify_create(dir, dentry);
- path->dentry = dentry;
- path->mnt = nd->path.mnt;
- return 1;
+ if (unlikely(d_is_negative(dentry))) {
+ error = -ENOENT;
+ } else {
+ path->dentry = dentry;
+ path->mnt = nd->path.mnt;
+ return 1;
+ }
}
}
dput(dentry);
int acc_mode = op->acc_mode;
unsigned seq;
struct inode *inode;
- struct path save_parent = { .dentry = NULL, .mnt = NULL };
struct path path;
- bool retried = false;
int error;
nd->flags &= ~LOOKUP_PARENT;
return -EISDIR;
}
-retry_lookup:
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
error = mnt_want_write(nd->path.mnt);
if (!error)
got_write = false;
}
+ error = follow_managed(&path, nd);
+ if (unlikely(error < 0))
+ return error;
+
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
return -EEXIST;
}
- error = follow_managed(&path, nd);
- if (unlikely(error < 0))
- return error;
-
seq = 0; /* out of RCU mode, so the value doesn't matter */
inode = d_backing_inode(path.dentry);
finish_lookup:
if (unlikely(error))
return error;
- if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
- path_to_nameidata(&path, nd);
- } else {
- save_parent.dentry = nd->path.dentry;
- save_parent.mnt = mntget(path.mnt);
- nd->path.dentry = path.dentry;
-
- }
+ path_to_nameidata(&path, nd);
nd->inode = inode;
nd->seq = seq;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
finish_open:
error = complete_walk(nd);
- if (error) {
- path_put(&save_parent);
+ if (error)
return error;
- }
audit_inode(nd->name, nd->path.dentry, 0);
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
error = vfs_open(&nd->path, file, current_cred());
- if (!error) {
- *opened |= FILE_OPENED;
- } else {
- if (error == -EOPENSTALE)
- goto stale_open;
+ if (error)
goto out;
- }
+ *opened |= FILE_OPENED;
opened:
error = open_check_o_direct(file);
if (!error)
}
if (got_write)
mnt_drop_write(nd->path.mnt);
- path_put(&save_parent);
return error;
-
-stale_open:
- /* If no saved parent or already retried then can't retry */
- if (!save_parent.dentry || retried)
- goto out;
-
- BUG_ON(save_parent.dentry != dir);
- path_put(&nd->path);
- nd->path = save_parent;
- nd->inode = dir->d_inode;
- save_parent.mnt = NULL;
- save_parent.dentry = NULL;
- if (got_write) {
- mnt_drop_write(nd->path.mnt);
- got_write = false;
- }
- retried = true;
- goto retry_lookup;
}
static int do_tmpfile(struct nameidata *nd, unsigned flags,
goto out_unlock;
lock_mount_hash();
+ event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
}
if (type->fs_flags & FS_USERNS_VISIBLE) {
- if (!fs_fully_visible(type, &mnt_flags))
+ if (!fs_fully_visible(type, &mnt_flags)) {
+ put_filesystem(type);
return -EPERM;
+ }
}
}
if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
+ /* Don't miss readonly hidden in the superblock flags */
+ if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
+ mnt_flags |= MNT_LOCK_READONLY;
+
/* Verify the mount flags are equal to or more permissive
* than the proposed new mount.
*/
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
struct inode *inode = child->mnt_mountpoint->d_inode;
/* Only worry about locked mounts */
- if (!(mnt_flags & MNT_LOCKED))
+ if (!(child->mnt.mnt_flags & MNT_LOCKED))
continue;
/* Is the directory permanetly empty? */
if (!is_empty_dir_inode(inode))
static
int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
{
+ struct inode *inode;
struct nfs_inode *nfsi;
if (d_really_is_negative(dentry))
return 0;
- nfsi = NFS_I(d_inode(dentry));
+ inode = d_inode(dentry);
+ if (is_bad_inode(inode) || NFS_STALE(inode))
+ return 0;
+
+ nfsi = NFS_I(inode);
if (entry->fattr->fileid == nfsi->fileid)
return 1;
if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
struct dentry *res;
- struct dentry *parent;
struct inode *inode = NULL;
struct nfs_fh *fhandle = NULL;
struct nfs_fattr *fattr = NULL;
if (IS_ERR(label))
goto out;
- parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
trace_nfs_lookup_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
struct dentry *res;
struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
unsigned int lookup_flags = 0;
+ bool switched = false;
int err;
/* Expect a negative dentry */
/* NFS only supports OPEN on regular files */
if ((open_flags & O_DIRECTORY)) {
- if (!d_unhashed(dentry)) {
+ if (!d_in_lookup(dentry)) {
/*
* Hashed negative dentry with O_DIRECTORY: dentry was
* revalidated and is fine, no need to perform lookup
attr.ia_size = 0;
}
+ if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
+ d_drop(dentry);
+ switched = true;
+ dentry = d_alloc_parallel(dentry->d_parent,
+ &dentry->d_name, &wq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ if (unlikely(!d_in_lookup(dentry)))
+ return finish_no_open(file, dentry);
+ }
+
ctx = create_nfs_open_context(dentry, open_flags);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
err = PTR_ERR(inode);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
+ d_drop(dentry);
switch (err) {
case -ENOENT:
- d_drop(dentry);
d_add(dentry, NULL);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
break;
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
+ if (unlikely(switched)) {
+ d_lookup_done(dentry);
+ dput(dentry);
+ }
return err;
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
- err = PTR_ERR(res);
+ if (switched) {
+ d_lookup_done(dentry);
+ if (!res)
+ res = dentry;
+ else
+ dput(dentry);
+ }
if (IS_ERR(res))
- goto out;
-
+ return PTR_ERR(res);
return finish_no_open(file, res);
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
result = wait_for_completion_killable(&dreq->completion);
+ if (!result) {
+ result = dreq->count;
+ WARN_ON_ONCE(dreq->count < 0);
+ }
if (!result)
result = dreq->error;
- if (!result)
- result = dreq->count;
out:
return (ssize_t) result;
if (dreq->iocb) {
long res = (long) dreq->error;
- if (!res)
+ if (dreq->count != 0) {
res = (long) dreq->count;
+ WARN_ON_ONCE(dreq->count < 0);
+ }
dreq->iocb->ki_complete(dreq->iocb, res, 0);
}
struct nfs_fattr *fattr = desc->fattr;
set_nfs_fileid(inode, fattr->fileid);
+ inode->i_mode = fattr->mode;
nfs_copy_fh(NFS_FH(inode), desc->fh);
return 0;
}
call_close |= is_wronly;
else if (is_wronly)
calldata->arg.fmode |= FMODE_WRITE;
+ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
+ call_close |= is_rdwr;
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (calldata->arg.fmode == 0)
- call_close |= is_rdwr;
-
if (!nfs4_valid_open_stateid(state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
break;
}
lo = NFS_I(inode)->layout;
- if (lo && nfs4_stateid_match(&lgp->args.stateid,
- &lo->plh_stateid)) {
+ if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
+ nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
LIST_HEAD(head);
/*
pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
+ status = -EAGAIN;
+ goto out;
} else
spin_unlock(&inode->i_lock);
- status = -EAGAIN;
- goto out;
}
status = nfs4_handle_exception(server, status, exception);
.flags = RPC_TASK_ASYNC,
};
struct pnfs_layout_segment *lseg = NULL;
- struct nfs4_exception exception = { .timeout = *timeout };
+ struct nfs4_exception exception = {
+ .inode = inode,
+ .timeout = *timeout,
+ };
int status = 0;
dprintk("--> %s\n", __func__);
}
spin_unlock(&state->state_lock);
}
- nfs4_put_open_state(state);
clear_bit(NFS_STATE_RECLAIM_NOGRACE,
&state->flags);
+ nfs4_put_open_state(state);
spin_lock(&sp->so_lock);
goto restart;
}
list_del_init(&lseg->pls_list);
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
atomic_dec(&lo->plh_refcount);
- if (list_empty(&lo->plh_segs))
+ if (list_empty(&lo->plh_segs)) {
+ set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+ }
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
INIT_LIST_HEAD(&lo->plh_bulk_destroy);
lo->plh_inode = ino;
lo->plh_lc_cred = get_rpccred(ctx->cred);
+ lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
return lo;
}
pnfs_find_alloc_layout(struct inode *ino,
struct nfs_open_context *ctx,
gfp_t gfp_flags)
+ __releases(&ino->i_lock)
+ __acquires(&ino->i_lock)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *new = NULL;
* stateid, or it has been invalidated, then we must use the open
* stateid.
*/
- if (lo->plh_stateid.seqid == 0 ||
- test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+ if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
/*
* The first layoutget for the file. Need to serialize per
}
/* Helper function for pnfs_generic_commit_pagelist to catch an empty
- * page list. This can happen when two commits race. */
+ * page list. This can happen when two commits race.
+ *
+ * This must be called instead of nfs_init_commit - call one or the other, but
+ * not both!
+ */
static bool
pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
struct nfs_commit_data *data,
if (list_empty(pages)) {
if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
wake_up_atomic_t(&cinfo->mds->rpcs_out);
- nfs_commitdata_release(data);
+ /* don't call nfs_commitdata_release - it tries to put
+ * the open_context which is not acquired until nfs_init_commit
+ * which has not been called on @data */
+ WARN_ON_ONCE(data->context);
+ nfs_commit_free(data);
return true;
}
nfs_list_remove_request(new);
nfs_readpage_release(new);
error = desc->pgio->pg_error;
- goto out_unlock;
+ goto out;
}
return 0;
out_error:
error = PTR_ERR(new);
-out_unlock:
unlock_page(page);
+out:
return error;
}
return error;
}
-#define NFSD_MDS_PR_KEY 0x0100000000000000
+#define NFSD_MDS_PR_KEY 0x0100000000000000ULL
/*
* We use the client ID as a unique key for the reservations.
goto out;
inode = d_inode(fh->fh_dentry);
- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
- error = -EOPNOTSUPP;
- goto out_errno;
- }
error = fh_want_write(fh);
if (error)
goto out_errno;
- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+ fh_lock(fh);
+
+ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
if (error)
- goto out_drop_write;
- error = inode->i_op->set_acl(inode, argp->acl_default,
- ACL_TYPE_DEFAULT);
+ goto out_drop_lock;
+ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
if (error)
- goto out_drop_write;
+ goto out_drop_lock;
+
+ fh_unlock(fh);
fh_drop_write(fh);
posix_acl_release(argp->acl_access);
posix_acl_release(argp->acl_default);
return nfserr;
-out_drop_write:
+out_drop_lock:
+ fh_unlock(fh);
fh_drop_write(fh);
out_errno:
nfserr = nfserrno(error);
goto out;
inode = d_inode(fh->fh_dentry);
- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
- error = -EOPNOTSUPP;
- goto out_errno;
- }
error = fh_want_write(fh);
if (error)
goto out_errno;
- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+ fh_lock(fh);
+
+ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
if (error)
- goto out_drop_write;
- error = inode->i_op->set_acl(inode, argp->acl_default,
- ACL_TYPE_DEFAULT);
+ goto out_drop_lock;
+ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
-out_drop_write:
+out_drop_lock:
+ fh_unlock(fh);
fh_drop_write(fh);
out_errno:
nfserr = nfserrno(error);
dentry = fhp->fh_dentry;
inode = d_inode(dentry);
- if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
- return nfserr_attrnotsupp;
-
if (S_ISDIR(inode->i_mode))
flags = NFS4_ACL_DIR;
if (host_error < 0)
goto out_nfserr;
- host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
+ fh_lock(fhp);
+
+ host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
if (host_error < 0)
- goto out_release;
+ goto out_drop_lock;
if (S_ISDIR(inode->i_mode)) {
- host_error = inode->i_op->set_acl(inode, dpacl,
- ACL_TYPE_DEFAULT);
+ host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
}
-out_release:
+out_drop_lock:
+ fh_unlock(fhp);
+
posix_acl_release(pacl);
posix_acl_release(dpacl);
out_nfserr:
}
}
-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
-{
- struct rpc_xprt *xprt;
-
- if (args->protocol != XPRT_TRANSPORT_BC_TCP)
- return rpc_create(args);
-
- xprt = args->bc_xprt->xpt_bc_xprt;
- if (xprt) {
- xprt_get(xprt);
- return rpc_create_xprt(args, xprt);
- }
-
- return rpc_create(args);
-}
-
static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
{
int maxtime = max_cb_time(clp->net);
args.authflavor = ses->se_cb_sec.flavor;
}
/* Create RPC client */
- client = create_backchannel_client(&args);
+ client = rpc_create(&args);
if (IS_ERR(client)) {
dprintk("NFSD: couldn't create callback client: %ld\n",
PTR_ERR(client));
}
static struct nfs4_ol_stateid *
-init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
- struct nfsd4_open *open)
+init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_ol_stateid *retstp = NULL;
+ struct nfs4_ol_stateid *stp;
+
+ stp = open->op_stp;
+ /* We are moving these outside of the spinlocks to avoid the warnings */
+ mutex_init(&stp->st_mutex);
+ mutex_lock(&stp->st_mutex);
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
+
+ open->op_stp = NULL;
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
stp->st_access_bmap = 0;
stp->st_deny_bmap = 0;
stp->st_openstp = NULL;
- init_rwsem(&stp->st_rwsem);
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
- return retstp;
+ if (retstp) {
+ mutex_lock(&retstp->st_mutex);
+ /* To keep mutex tracking happy */
+ mutex_unlock(&stp->st_mutex);
+ stp = retstp;
+ }
+ return stp;
}
/*
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct nfs4_ol_stateid *stp = NULL;
- struct nfs4_ol_stateid *swapstp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
*/
if (stp) {
/* Stateid was found, this is an OPEN upgrade */
- down_read(&stp->st_rwsem);
+ mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
- stp = open->op_stp;
- open->op_stp = NULL;
- swapstp = init_open_stateid(stp, fp, open);
- if (swapstp) {
- nfs4_put_stid(&stp->st_stid);
- stp = swapstp;
- down_read(&stp->st_rwsem);
+ /* stp is returned locked. */
+ stp = init_open_stateid(fp, open);
+ /* See if we lost the race to some other thread */
+ if (stp->st_access_bmap != 0) {
status = nfs4_upgrade_open(rqstp, fp, current_fh,
stp, open);
if (status) {
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
goto upgrade_out;
}
- down_read(&stp->st_rwsem);
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
release_open_stateid(stp);
goto out;
}
}
upgrade_out:
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
* revoked delegations are kept only for free_stateid.
*/
return nfserr_bad_stateid;
- down_write(&stp->st_rwsem);
+ mutex_lock(&stp->st_mutex);
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
if (status != nfs_ok)
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
return status;
}
return status;
oo = openowner(stp->st_stateowner);
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
return nfserr_bad_stateid;
}
oo = openowner(stp->st_stateowner);
status = nfserr_bad_stateid;
if (oo->oo_flags & NFS4_OO_CONFIRMED) {
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
goto put_stateid;
}
oo->oo_flags |= NFS4_OO_CONFIRMED;
nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
status = nfs_ok;
put_stateid:
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
- init_rwsem(&stp->st_rwsem);
+ mutex_init(&stp->st_mutex);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
spin_lock(&fp->fi_lock);
&open_stp, nn);
if (status)
goto out;
- up_write(&open_stp->st_rwsem);
+ mutex_unlock(&open_stp->st_mutex);
open_sop = openowner(open_stp->st_stateowner);
status = nfserr_bad_stateid;
if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new);
if (status == nfs_ok)
- down_write(&lock_stp->st_rwsem);
+ mutex_lock(&lock_stp->st_mutex);
} else {
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
- up_write(&lock_stp->st_rwsem);
+ mutex_unlock(&lock_stp->st_mutex);
/*
* If this is a new, never-before-used stateid, and we are
fput:
fput(filp);
put_stateid:
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
unsigned char st_access_bmap;
unsigned char st_deny_bmap;
struct nfs4_ol_stateid *st_openstp;
- struct rw_semaphore st_rwsem;
+ struct mutex st_mutex;
};
static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
return 0;
bytes = le16_to_cpu(sbp->s_bytes);
- if (bytes > BLOCK_SIZE)
+ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
return 0;
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
sumoff);
ccflags-y := -Ifs/ocfs2
-ccflags-y += -DCATCH_BH_JBD_RACES
-
obj-$(CONFIG_OCFS2_FS) += \
ocfs2.o \
ocfs2_stackglue.o
lock_buffer(bh);
if (buffer_jbd(bh)) {
+#ifdef CATCH_BH_JBD_RACES
mlog(ML_ERROR,
"block %llu had the JBD bit set "
"while I was in lock_buffer!",
(unsigned long long)bh->b_blocknr);
BUG();
+#else
+ unlock_buffer(bh);
+ continue;
+#endif
}
clear_buffer_uptodate(bh);
err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
} else {
const struct cred *old_cred;
+ struct cred *override_cred;
old_cred = ovl_override_creds(dentry->d_sb);
- err = ovl_create_over_whiteout(dentry, inode, &stat, link,
- hardlink);
+ err = -ENOMEM;
+ override_cred = prepare_creds();
+ if (override_cred) {
+ override_cred->fsuid = old_cred->fsuid;
+ override_cred->fsgid = old_cred->fsgid;
+ put_cred(override_creds(override_cred));
+ put_cred(override_cred);
+ err = ovl_create_over_whiteout(dentry, inode, &stat,
+ link, hardlink);
+ }
revert_creds(old_cred);
}
struct dentry *upper;
struct dentry *opaquedir = NULL;
int err;
+ int flags = 0;
if (WARN_ON(!workdir))
return -EROFS;
if (err)
goto out_dput;
- whiteout = ovl_whiteout(workdir, dentry);
- err = PTR_ERR(whiteout);
- if (IS_ERR(whiteout))
+ upper = lookup_one_len(dentry->d_name.name, upperdir,
+ dentry->d_name.len);
+ err = PTR_ERR(upper);
+ if (IS_ERR(upper))
goto out_unlock;
- upper = ovl_dentry_upper(dentry);
- if (!upper) {
- upper = lookup_one_len(dentry->d_name.name, upperdir,
- dentry->d_name.len);
- err = PTR_ERR(upper);
- if (IS_ERR(upper))
- goto kill_whiteout;
-
- err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
- dput(upper);
- if (err)
- goto kill_whiteout;
- } else {
- int flags = 0;
+ err = -ESTALE;
+ if ((opaquedir && upper != opaquedir) ||
+ (!opaquedir && ovl_dentry_upper(dentry) &&
+ upper != ovl_dentry_upper(dentry))) {
+ goto out_dput_upper;
+ }
- if (opaquedir)
- upper = opaquedir;
- err = -ESTALE;
- if (upper->d_parent != upperdir)
- goto kill_whiteout;
+ whiteout = ovl_whiteout(workdir, dentry);
+ err = PTR_ERR(whiteout);
+ if (IS_ERR(whiteout))
+ goto out_dput_upper;
- if (is_dir)
- flags |= RENAME_EXCHANGE;
+ if (d_is_dir(upper))
+ flags = RENAME_EXCHANGE;
- err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
- if (err)
- goto kill_whiteout;
+ err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+ if (err)
+ goto kill_whiteout;
+ if (flags)
+ ovl_cleanup(wdir, upper);
- if (is_dir)
- ovl_cleanup(wdir, upper);
- }
ovl_dentry_version_inc(dentry->d_parent);
out_d_drop:
d_drop(dentry);
dput(whiteout);
+out_dput_upper:
+ dput(upper);
out_unlock:
unlock_rename(workdir, upperdir);
out_dput:
if (err)
goto out;
+ if (attr->ia_valid & ATTR_SIZE) {
+ struct inode *realinode = d_inode(ovl_dentry_real(dentry));
+
+ err = -ETXTBSY;
+ if (atomic_read(&realinode->i_writecount) < 0)
+ goto out_drop_write;
+ }
+
err = ovl_copy_up(dentry);
if (!err) {
+ struct inode *winode = NULL;
+
upperdentry = ovl_dentry_upper(dentry);
+ if (attr->ia_valid & ATTR_SIZE) {
+ winode = d_inode(upperdentry);
+ err = get_write_access(winode);
+ if (err)
+ goto out_drop_write;
+ }
+
+ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+ attr->ia_valid &= ~ATTR_MODE;
+
inode_lock(upperdentry->d_inode);
err = notify_change(upperdentry, attr, NULL);
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
inode_unlock(upperdentry->d_inode);
+
+ if (winode)
+ put_write_access(winode);
}
+out_drop_write:
ovl_drop_write(dentry);
out:
return err;
err = vfs_getattr(&realpath, &stat);
if (err)
- return err;
+ goto out_dput;
+ err = -ESTALE;
if ((stat.mode ^ inode->i_mode) & S_IFMT)
- return -ESTALE;
+ goto out_dput;
inode->i_mode = stat.mode;
inode->i_uid = stat.uid;
inode->i_gid = stat.gid;
- return generic_permission(inode, mask);
+ err = generic_permission(inode, mask);
+ goto out_dput;
}
/* Careful in RCU walk mode */
return err;
}
-static bool ovl_need_xattr_filter(struct dentry *dentry,
- enum ovl_path_type type)
-{
- if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
- return S_ISDIR(dentry->d_inode->i_mode);
- else
- return false;
-}
-
ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size)
{
- struct path realpath;
- enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+ struct dentry *realdentry = ovl_dentry_real(dentry);
- if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+ if (ovl_is_private_xattr(name))
return -ENODATA;
- return vfs_getxattr(realpath.dentry, name, value, size);
+ return vfs_getxattr(realdentry, name, value, size);
}
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
- struct path realpath;
- enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+ struct dentry *realdentry = ovl_dentry_real(dentry);
ssize_t res;
int off;
- res = vfs_listxattr(realpath.dentry, list, size);
+ res = vfs_listxattr(realdentry, list, size);
if (res <= 0 || size == 0)
return res;
- if (!ovl_need_xattr_filter(dentry, type))
- return res;
-
/* filter out private xattrs */
for (off = 0; off < res;) {
char *s = list + off;
goto out;
err = -ENODATA;
- if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+ if (ovl_is_private_xattr(name))
goto out_drop_write;
if (!OVL_TYPE_UPPER(type)) {
if (!inode)
return NULL;
- mode &= S_IFMT;
-
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ mode &= S_IFMT;
switch (mode) {
case S_IFDIR:
inode->i_private = oe;
{
to->i_uid = from->i_uid;
to->i_gid = from->i_gid;
+ to->i_mode = from->i_mode;
}
/* dir.c */
if (err < 0)
goto out_put_workdir;
- if (!err) {
- pr_err("overlayfs: upper fs needs to support d_type.\n");
- err = -EINVAL;
- goto out_put_workdir;
- }
+ /*
+ * We allowed this configuration and don't want to
+ * break users over kernel upgrade. So warn instead
+ * of erroring out.
+ */
+ if (!err)
+ pr_warn("overlayfs: upper fs needs to support d_type.\n");
}
}
return error;
}
-static int
-posix_acl_xattr_set(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+int
+set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
{
- struct posix_acl *acl = NULL;
- int ret;
-
if (!IS_POSIXACL(inode))
return -EOPNOTSUPP;
if (!inode->i_op->set_acl)
return -EOPNOTSUPP;
- if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- return value ? -EACCES : 0;
+ if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ return acl ? -EACCES : 0;
if (!inode_owner_or_capable(inode))
return -EPERM;
+ if (acl) {
+ int ret = posix_acl_valid(acl);
+ if (ret)
+ return ret;
+ }
+ return inode->i_op->set_acl(inode, acl, type);
+}
+EXPORT_SYMBOL(set_posix_acl);
+
+static int
+posix_acl_xattr_set(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct posix_acl *acl = NULL;
+ int ret;
+
if (value) {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
-
- if (acl) {
- ret = posix_acl_valid(acl);
- if (ret)
- goto out;
- }
}
-
- ret = inode->i_op->set_acl(inode, acl, handler->flags);
-out:
+ ret = set_posix_acl(inode, handler->flags, acl);
posix_acl_release(acl);
return ret;
}
if (IS_ERR(sb))
return ERR_CAST(sb);
+ /*
+ * procfs isn't actually a stacking filesystem; however, there is
+ * too much magic going on inside it to permit stacking things on
+ * top of it
+ */
+ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
if (!proc_parse_options(options, ns)) {
deactivate_locked_super(sb);
return ERR_PTR(-EINVAL);
unsigned long safe_mask = 0;
unsigned int commit_max_age = (unsigned int)-1;
struct reiserfs_journal *journal = SB_JOURNAL(s);
- char *new_opts = kstrdup(arg, GFP_KERNEL);
+ char *new_opts;
int err;
char *qf_names[REISERFS_MAXQUOTAS];
unsigned int qfmt = 0;
int i;
#endif
+ new_opts = kstrdup(arg, GFP_KERNEL);
+ if (arg && !new_opts)
+ return -ENOMEM;
+
sync_filesystem(s);
reiserfs_write_lock(s);
}
out_ok_unlocked:
- replace_mount_options(s, new_opts);
+ if (new_opts)
+ replace_mount_options(s, new_opts);
return 0;
out_err_unlock:
#include "ubifs.h"
#include <linux/mount.h>
#include <linux/slab.h>
+#include <linux/migrate.h>
static int read_block(struct inode *inode, void *addr, unsigned int block,
struct ubifs_data_node *dn)
return ret;
}
+#ifdef CONFIG_MIGRATION
+static int ubifs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc;
+
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
+
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ SetPagePrivate(newpage);
+ }
+
+ migrate_page_copy(newpage, page);
+ return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
{
/*
.write_end = ubifs_write_end,
.invalidatepage = ubifs_invalidatepage,
.set_page_dirty = ubifs_set_page_dirty,
+#ifdef CONFIG_MIGRATION
+ .migratepage = ubifs_migrate_page,
+#endif
.releasepage = ubifs_releasepage,
};
map = &UDF_SB(sb)->s_partmaps[partition];
/* map to sparable/physical partition desc */
phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
- map->s_partition_num, ext_offset + offset);
+ map->s_type_specific.s_metadata.s_phys_partition_ref,
+ ext_offset + offset);
}
brelse(epos.bh);
mdata = &map->s_type_specific.s_metadata;
inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
- /* We shouldn't mount such media... */
- BUG_ON(!inode);
+ if (!inode)
+ return 0xFFFFFFFF;
+
retblk = udf_try_read_meta(inode, block, partition, offset);
if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
- mdata->s_mirror_file_loc, map->s_partition_num);
+ mdata->s_mirror_file_loc,
+ mdata->s_phys_partition_ref);
+ if (IS_ERR(mdata->s_mirror_fe))
+ mdata->s_mirror_fe = NULL;
mdata->s_flags |= MF_MIRROR_FE_LOADED;
}
}
struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
- u32 meta_file_loc, u32 partition_num)
+ u32 meta_file_loc, u32 partition_ref)
{
struct kernel_lb_addr addr;
struct inode *metadata_fe;
addr.logicalBlockNum = meta_file_loc;
- addr.partitionReferenceNum = partition_num;
+ addr.partitionReferenceNum = partition_ref;
metadata_fe = udf_iget_special(sb, &addr);
return metadata_fe;
}
-static int udf_load_metadata_files(struct super_block *sb, int partition)
+static int udf_load_metadata_files(struct super_block *sb, int partition,
+ int type1_index)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
map = &sbi->s_partmaps[partition];
mdata = &map->s_type_specific.s_metadata;
+ mdata->s_phys_partition_ref = type1_index;
/* metadata address */
udf_debug("Metadata file location: block = %d part = %d\n",
- mdata->s_meta_file_loc, map->s_partition_num);
+ mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
- map->s_partition_num);
+ mdata->s_phys_partition_ref);
if (IS_ERR(fe)) {
/* mirror file entry */
udf_debug("Mirror metadata file location: block = %d part = %d\n",
- mdata->s_mirror_file_loc, map->s_partition_num);
+ mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
- map->s_partition_num);
+ mdata->s_phys_partition_ref);
if (IS_ERR(fe)) {
udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
*/
if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
addr.logicalBlockNum = mdata->s_bitmap_file_loc;
- addr.partitionReferenceNum = map->s_partition_num;
+ addr.partitionReferenceNum = mdata->s_phys_partition_ref;
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
p = (struct partitionDesc *)bh->b_data;
partitionNumber = le16_to_cpu(p->partitionNumber);
- /* First scan for TYPE1, SPARABLE and METADATA partitions */
+ /* First scan for TYPE1 and SPARABLE partitions */
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
udf_debug("Searching map: (%d == %d)\n",
goto out_bh;
if (map->s_partition_type == UDF_METADATA_MAP25) {
- ret = udf_load_metadata_files(sb, i);
+ ret = udf_load_metadata_files(sb, i, type1_idx);
if (ret < 0) {
udf_err(sb, "error loading MetaData partition map %d\n",
i);
__u32 s_bitmap_file_loc;
__u32 s_alloc_unit_size;
__u16 s_align_unit_size;
+ /*
+ * Partition Reference Number of the associated physical / sparable
+ * partition
+ */
+ __u16 s_phys_partition_ref;
int s_flags;
struct inode *s_metadata_fe;
struct inode *s_mirror_fe;
goto out_put_tmp_file;
}
+ if (f.file->f_op != &xfs_file_operations ||
+ tmp.file->f_op != &xfs_file_operations) {
+ error = -EINVAL;
+ goto out_put_tmp_file;
+ }
+
ip = XFS_I(file_inode(f.file));
tip = XFS_I(file_inode(tmp.file));
container_of(fwnode, struct acpi_data_node, fwnode) : NULL;
}
+static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ return is_acpi_data_node(fwnode) ?
+ (!strcmp(to_acpi_data_node(fwnode)->name, name)) : false;
+}
+
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
{
return &adev->fwnode;
/* ACPI PCI Interrupt Link (pci_link.c) */
+int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
/*
* Optionally support group module level code.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
*/
extern bool acpi_video_handles_brightness_key_presses(void);
extern int acpi_video_get_levels(struct acpi_device *device,
- struct acpi_video_device_brightness **dev_br);
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
return false;
}
static inline int acpi_video_get_levels(struct acpi_device *device,
- struct acpi_video_device_brightness **dev_br)
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level)
{
return -ENODEV;
}
#include <asm-generic/qspinlock_types.h>
+/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
+#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
- * queued_spin_lock_slowpath() can ACQUIRE the lock before
- * issuing the unordered store that sets _Q_LOCKED_VAL.
- *
- * See both smp_cond_acquire() sites for more detail.
- *
- * This however means that in code like:
- *
- * spin_lock(A) spin_lock(B)
- * spin_unlock_wait(B) spin_is_locked(A)
- * do_something() do_something()
- *
- * Both CPUs can end up running do_something() because the store
- * setting _Q_LOCKED_VAL will pass through the loads in
- * spin_unlock_wait() and/or spin_is_locked().
+ * See queued_spin_unlock_wait().
*
- * Avoid this by issuing a full memory barrier between the spin_lock()
- * and the loads in spin_unlock_wait() and spin_is_locked().
- *
- * Note that regular mutual exclusion doesn't care about this
- * delayed store.
+ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+ * isn't immediately observable.
*/
- smp_mb();
- return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+ return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
}
#endif
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
- /* See queued_spin_is_locked() */
- smp_mb();
- while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
- cpu_relax();
-}
-
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
#define INIT_TEXT \
*(.init.text) \
+ *(.text.startup) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
+ *(.fini_array) \
+ *(.dtors) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
+ *(.text.exit) \
MEM_DISCARD(exit.text)
#define EXIT_CALL \
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
#define INTEL_KBL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
+ INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
#define INTEL_KBL_GT4_IDS(info) \
- INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
- INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
- INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
- INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
+ INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
+ *
+ * @placement: Return immediately if buffer is busy.
+ * @mem: The struct ttm_mem_reg indicating the region where the bo resides
+ * @new_flags: Describes compatible placement found
+ *
+ * Returns true if the placement is compatible
+ */
+extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags);
+
/**
* ttm_bo_validate
*
#ifndef __ASM_ARM_KVM_PMU_H
#define __ASM_ARM_KVM_PMU_H
-#ifdef CONFIG_KVM_ARM_PMU
-
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+#ifdef CONFIG_KVM_ARM_PMU
+
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
return NULL;
}
+static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ return false;
+}
+
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
{
return NULL;
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
-#include <linux/tty.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
return tsk->sessionid;
}
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
- struct tty_struct *tty = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&tsk->sighand->siglock, flags);
- if (tsk->signal)
- tty = tty_kref_get(tsk->signal->tty);
- spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
- return tty;
-}
-
-static inline void audit_put_tty(struct tty_struct *tty)
-{
- tty_kref_put(tty);
-}
-
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern void __audit_bprm(struct linux_binprm *bprm);
{
return -1;
}
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
- return NULL;
-}
-static inline void audit_put_tty(struct tty_struct *tty)
-{ }
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
#define BCMA_CORE_DEFAULT 0xFFF
#define BCMA_MAX_NR_CORES 16
+#define BCMA_CORE_SIZE 0x1000
/* Chip IDs of PCIe devices */
#define BCMA_CHIP_ID_BCM4313 0x4313
unsigned long limit;
unsigned long mm_flags;
loff_t written;
+ loff_t pos;
};
/*
BPF_WRITE = 2
};
+/* types of values stored in eBPF registers */
+enum bpf_reg_type {
+ NOT_INIT = 0, /* nothing was written into register */
+ UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ PTR_TO_CTX, /* reg points to bpf_context */
+ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
+ PTR_TO_MAP_VALUE, /* reg points to map element value */
+ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+ FRAME_PTR, /* reg == frame_pointer */
+ PTR_TO_STACK, /* reg == frame_pointer + imm */
+ CONST_IMM, /* constant integer value */
+
+ /* PTR_TO_PACKET represents:
+ * skb->data
+ * skb->data + imm
+ * skb->data + (u16) var
+ * skb->data + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) menas that 'imm' was added
+ */
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_END, /* skb->data + headlen */
+};
+
struct bpf_prog;
struct bpf_verifier_ops {
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
- bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+ bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
+
+static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
struct workqueue_struct *notify_wq;
};
+static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
+{
+ return osdc->osdmap->flags & flag;
+}
+
extern int ceph_osdc_setup(void);
extern void ceph_osdc_cleanup(void);
return !ceph_osd_is_up(map, osd);
}
-static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
-{
- return map && (map->flags & flag);
-}
-
extern char *ceph_osdmap_state_str(char *str, int len, int state);
extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */
+ /* unused */
#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
#else
static inline void disable_cpuidle(void) { }
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
#define DCACHE_OP_REAL 0x08000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR 0x20000000
extern seqlock_t rename_lock;
return inode;
}
+/**
+ * d_real_inode - Return the real inode
+ * @dentry: The dentry to query
+ *
+ * If dentry is on an union/overlay, then return the underlying, real inode.
+ * Otherwise return d_inode().
+ */
+static inline struct inode *d_real_inode(struct dentry *dentry)
+{
+ return d_backing_inode(d_real(dentry));
+}
+
#endif /* __LINUX_DCACHE_H */
#include <linux/errno.h>
-struct pts_fs_info;
-
#ifdef CONFIG_UNIX98_PTYS
-/* Look up a pts fs info and get a ref to it */
-struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
-void devpts_put_ref(struct pts_fs_info *);
+struct pts_fs_info;
+
+struct pts_fs_info *devpts_acquire(struct file *);
+void devpts_release(struct pts_fs_info *);
int devpts_new_index(struct pts_fs_info *);
void devpts_kill_index(struct pts_fs_info *, int);
* @file: file pointer used for sharing buffers across, and for refcounting.
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
+ * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
+ * @vmapping_counter: used internally to refcnt the vmaps
+ * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
+ * @poll: for userspace poll support
+ * @cb_excl: for userspace poll support
+ * @cb_shared: for userspace poll support
*/
struct dma_buf {
size_t size;
struct file *file;
struct list_head attachments;
const struct dma_buf_ops *ops;
- /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
struct mutex lock;
unsigned vmapping_counter;
void *vmap_ptr;
/**
* helper macro for exporters; zeros and fills in most common values
+ *
+ * @name: export-info name
*/
-#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
- struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
+ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
.owner = THIS_MODULE }
/**
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
- (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+ ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
(md) = (void *)(md) + (m)->desc_size)
/**
* @timestamp: Timestamp when the fence was signaled.
* @status: Optional, only valid if < 0, must be set before calling
* fence_signal, indicates that the fence has completed with an error.
+ * @child_list: list of children fences
+ * @active_list: list of active fences
*
* the flags member must be manipulated and read using the appropriate
* atomic ops (bit_*), so taking the spinlock will not be needed most
}
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
-int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+ return sk_filter_trim_cap(sk, skb, 1);
+}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
/* check the consistency between the backing cache and the FS-Cache
* cookie */
- bool (*check_consistency)(struct fscache_operation *op);
+ int (*check_consistency)(struct fscache_operation *op);
/* store the updated auxiliary data on an object */
void (*update_object)(struct fscache_object *object);
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze);
+ unsigned long address, bool freeze, struct page *page);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
if (pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
- false); \
+ false, NULL); \
} while (0)
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
+ * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
+ * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
*/
struct st_sensor_data {
struct device *dev;
const struct st_sensor_transfer_function *tf;
struct st_sensor_transfer_buffer tb;
+
+ bool hw_irq_trigger;
+ s64 hw_timestamp;
};
#ifdef CONFIG_IIO_BUFFER
const struct iio_trigger_ops *trigger_ops);
void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
-
+int st_sensors_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
#else
static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
return;
}
+#define st_sensors_validate_device NULL
#endif
int st_sensors_init_sensor(struct iio_dev *indio_dev,
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+ struct inet_diag_msg *r, int ext,
+ struct user_namespace *user_ns);
+
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
#endif /* _INET_DIAG_H_ */
#define INIT_TASK(tsk) \
{ \
.state = 0, \
- .stack = &init_thread_info, \
+ .stack = init_stack, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.prio = MAX_PRIO-20, \
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
#define ICC_SGI1R_SGI_ID_SHIFT 24
-#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
-#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
-#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
#include <asm/arch_gicv3.h>
#define __LINUX_ISA_H
#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
struct isa_driver {
#define to_isa_driver(x) container_of((x), struct isa_driver, driver)
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ISA_BUS_API
int isa_register_driver(struct isa_driver *, unsigned int);
void isa_unregister_driver(struct isa_driver *);
#else
static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
{
- return 0;
+ return -ENODEV;
}
static inline void isa_unregister_driver(struct isa_driver *d)
#include <linux/atomic.h>
+#ifdef HAVE_JUMP_LABEL
+
static inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ /*
+ * -1 means the first static_key_slow_inc() is in progress.
+ * static_key_enabled() must return true, so return 1 here.
+ */
+ int n = atomic_read(&key->enabled);
+ return n >= 0 ? n : 1;
}
-#ifdef HAVE_JUMP_LABEL
-
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
#else /* !HAVE_JUMP_LABEL */
+static inline int static_key_count(struct static_key *key)
+{
+ return atomic_read(&key->enabled);
+}
+
static __always_inline void jump_label_init(void)
{
static_key_initialized = true;
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
-void kasan_kfree(void *ptr);
+void kasan_poison_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object);
-void kasan_poison_slab_free(struct kmem_cache *s, void *object);
struct kasan_cache {
int alloc_meta_offset;
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
+size_t ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+
#else /* CONFIG_KASAN */
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
{
return false;
}
-static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+static inline void kasan_unpoison_slab(const void *ptr) { }
+
#endif /* CONFIG_KASAN */
#endif /* LINUX_KASAN_H */
#define LED_UNREGISTERING (1 << 1)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_ONESHOT (1 << 17)
-#define LED_BLINK_ONESHOT_STOP (1 << 18)
-#define LED_BLINK_INVERT (1 << 19)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20)
-#define LED_BLINK_DISABLE (1 << 21)
-#define LED_SYSFS_DISABLE (1 << 22)
-#define LED_DEV_CAP_FLASH (1 << 23)
-#define LED_HW_PLUGGABLE (1 << 24)
-#define LED_PANIC_INDICATOR (1 << 25)
+#define LED_BLINK_SW (1 << 17)
+#define LED_BLINK_ONESHOT (1 << 18)
+#define LED_BLINK_ONESHOT_STOP (1 << 19)
+#define LED_BLINK_INVERT (1 << 20)
+#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
+#define LED_BLINK_DISABLE (1 << 22)
+#define LED_SYSFS_DISABLE (1 << 23)
+#define LED_DEV_CAP_FLASH (1 << 24)
+#define LED_HW_PLUGGABLE (1 << 25)
+#define LED_PANIC_INDICATOR (1 << 26)
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
* and if both are zero then a sensible default should be chosen.
* The call should adjust the timings in that case and if it can't
* match the values specified exactly.
- * Deactivate blinking again when the brightness is set to a fixed
- * value via the brightness_set() callback.
+ * Deactivate blinking again when the brightness is set to LED_OFF
+ * via the brightness_set() callback.
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
#define MEM_CGROUP_ID_SHIFT 16
#define MEM_CGROUP_ID_MAX USHRT_MAX
+struct mem_cgroup_id {
+ int id;
+ atomic_t ref;
+};
+
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
struct mem_cgroup {
struct cgroup_subsys_state css;
+ /* Private memcg ID. Used to ID objects that outlive the cgroup */
+ struct mem_cgroup_id id;
+
/* Accounted resources */
struct page_counter memory;
struct page_counter swap;
if (mem_cgroup_disabled())
return 0;
- return memcg->css.id;
-}
-
-/**
- * mem_cgroup_from_id - look up a memcg from an id
- * @id: the id to look up
- *
- * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
- */
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
-{
- struct cgroup_subsys_state *css;
-
- css = css_from_id(id, &memory_cgrp_subsys);
- return mem_cgroup_from_css(css);
+ return memcg->id.id;
}
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
/**
* parent_mem_cgroup - find the accounting parent of a memcg
#define _WM_ARIZONA_CORE_H
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/arizona/pdata.h>
uint16_t dac_comp_coeff;
uint8_t dac_comp_enabled;
struct mutex dac_comp_lock;
+
+ struct blocking_notifier_head notifier;
};
+static inline int arizona_call_notifiers(struct arizona *arizona,
+ unsigned long event,
+ void *data)
+{
+ return blocking_notifier_call_chain(&arizona->notifier, event, data);
+}
+
int arizona_clk32k_enable(struct arizona *arizona);
int arizona_clk32k_disable(struct arizona *arizona);
static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
unsigned reg_cnt, unsigned char *val)
{
- int ret;
+ int ret = 0;
int i;
for (i = 0; i < reg_cnt; i++) {
enum {
MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+ MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
u8 rsvd[8];
};
-#define MLX5_CMD_OP_MAX 0x920
-
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
void *uout;
int uout_size;
mlx5_cmd_cbk_t callback;
+ struct delayed_work cb_timeout_work;
void *context;
int idx;
struct completion done;
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
- MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
+ MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_MAX
};
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x1b];
+ u8 reserved_at_5[0x19];
+ u8 nic_vport_node_guid_modify[0x1];
+ u8 nic_vport_port_guid_modify[0x1];
u8 reserved_at_20[0x7e0];
};
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x19];
+ u8 reserved_at_0[0x16];
+ u8 node_guid[0x1];
+ u8 port_guid[0x1];
+ u8 reserved_at_18[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
enum {
MLX5_FENCE_MODE_NONE = 0 << 5,
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
+ MLX5_FENCE_MODE_FENCE = 2 << 5,
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
};
};
struct mlx5_qp_path {
- u8 fl;
+ u8 fl_free_ar;
u8 rsvd3;
- u8 free_ar;
- u8 pkey_index;
+ __be16 pkey_index;
u8 rsvd0;
u8 grh_mlid;
__be16 rlid;
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
+ u8 rsvd2[16];
};
struct mlx5_modify_qp_mbox_out {
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
}
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
- struct page *page, pte_t *pte, bool write, bool anon, bool old);
+ struct page *page, pte_t *pte, bool write, bool anon);
#endif
/*
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
+extern int path_pts(struct path *path);
+
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
net_ratelimit()) \
- __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
+ ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
}
+/* return true if dev can't cope with mtu frames that need vlan tag insertion */
+static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+{
+ /* TODO: reserve and use an additional IFF bit, if we get more users */
+ return dev->priv_flags & IFF_MACSEC;
+}
+
extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
#endif
-/* Default string compare functions, Allow arch asm/prom.h to override */
-#if !defined(of_compat_cmp)
-#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
-#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
-#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
-#endif
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
return NULL;
}
-static inline int of_parse_phandle_with_args(struct device_node *np,
+static inline int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name,
int index,
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
+/* Default string compare functions, Allow arch asm/prom.h to override */
+#if !defined(of_compat_cmp)
+#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
+#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
+#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
+#endif
+
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
struct of_phandle_args;
struct device_node;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_PCI
int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_reserved_mem_device_init(struct device *dev);
void of_reserved_mem_device_release(struct device *dev);
+int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t start,
+ phys_addr_t end,
+ bool nomap,
+ phys_addr_t *res_base);
+
void fdt_init_reserved_mem(void);
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
static inline bool page_is_young(struct page *page)
{
- return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline void set_page_young(struct page *page)
{
- set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool test_and_clear_page_young(struct page *page)
{
- return test_and_clear_bit(PAGE_EXT_YOUNG,
- &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool page_is_idle(struct page *page)
{
- return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void set_page_idle(struct page *page)
{
- set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void clear_page_idle(struct page *page)
{
- clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
#endif /* CONFIG_64BIT */
};
struct posix_acl {
- union {
- atomic_t a_refcount;
- struct rcu_head a_rcu;
- };
+ atomic_t a_refcount;
+ struct rcu_head a_rcu;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
};
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname);
+
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
if (!pwm)
return -EINVAL;
+ if (duty_ns < 0 || period_ns < 0)
+ return -EINVAL;
+
pwm_get_state(pwm, &state);
if (state.duty_cycle == duty_ns && state.period == period_ns)
return 0;
static inline void pwm_apply_args(struct pwm_device *pwm)
{
+ struct pwm_state state = { };
+
/*
* PWM users calling pwm_apply_args() expect to have a fresh config
* where the polarity and period are set according to pwm_args info.
* at startup (even if they are actually enabled), thus authorizing
* polarity setting.
*
- * Instead of setting ->enabled to false, we call pwm_disable()
- * before pwm_set_polarity() to ensure that everything is configured
- * as expected, and the PWM is really disabled when the user request
- * it.
+ * To fulfill this requirement, we apply a new state which disables
+ * the PWM device and set the reference period and polarity config.
*
* Note that PWM users requiring a smooth handover between the
* bootloader and the kernel (like critical regulators controlled by
* PWM devices) will have to switch to the atomic API and avoid calling
* pwm_apply_args().
*/
- pwm_disable(pwm);
- pwm_set_polarity(pwm, pwm->args.polarity);
+
+ state.enabled = false;
+ state.polarity = pwm->args.polarity;
+ state.period = pwm->args.period;
+
+ pwm_apply_state(pwm, &state);
}
struct pwm_lookup {
bool drop_ttl0;
u8 vport_id;
u16 mtu;
+ bool clear_stats;
};
struct qed_stop_rxq_params {
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
+ iter->tags = 0;
return NULL;
}
extern struct lock_class_key reservation_seqcount_class;
extern const char reservation_seqcount_string[];
+/**
+ * struct reservation_object_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
struct reservation_object_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
struct fence __rcu *shared[];
};
+/**
+ * struct reservation_object - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ * @staged: staged copy of shared fences for RCU updates
+ */
struct reservation_object {
struct ww_mutex lock;
seqcount_t seq;
#define reservation_object_assert_held(obj) \
lockdep_assert_held(&(obj)->lock.base)
+/**
+ * reservation_object_init - initialize a reservation object
+ * @obj: the reservation object
+ */
static inline void
reservation_object_init(struct reservation_object *obj)
{
obj->staged = NULL;
}
+/**
+ * reservation_object_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
static inline void
reservation_object_fini(struct reservation_object *obj)
{
ww_mutex_destroy(&obj->lock);
}
+/**
+ * reservation_object_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. Does NOT take references to
+ * the fence. The obj->lock must be held.
+ */
static inline struct reservation_object_list *
reservation_object_get_list(struct reservation_object *obj)
{
reservation_object_held(obj));
}
+/**
+ * reservation_object_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any). Does NOT take a
+ * reference. The obj->lock must be held.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
static inline struct fence *
reservation_object_get_excl(struct reservation_object *obj)
{
reservation_object_held(obj));
}
+/**
+ * reservation_object_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
static inline struct fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
#endif /* CONFIG_RESET_CONTROLLER */
/**
- * reset_control_get - Lookup and obtain an exclusive reference to a
- * reset controller.
+ * reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ * to a reset controller.
* @dev: device to be reset by the controller
* @id: reset line name
*
*
* Use of id names is optional.
*/
-static inline struct reset_control *__must_check reset_control_get(
- struct device *dev, const char *id)
+static inline struct reset_control *
+__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
}
-static inline struct reset_control *reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
-}
-
/**
* reset_control_get_shared - Lookup and obtain a shared reference to a
* reset controller.
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
}
+static inline struct reset_control *reset_control_get_optional_exclusive(
+ struct device *dev, const char *id)
+{
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
+}
+
+static inline struct reset_control *reset_control_get_optional_shared(
+ struct device *dev, const char *id)
+{
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
+}
+
/**
- * of_reset_control_get - Lookup and obtain an exclusive reference to a
- * reset controller.
+ * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ * to a reset controller.
* @node: device to be reset by the controller
* @id: reset line name
*
*
* Use of id names is optional.
*/
-static inline struct reset_control *of_reset_control_get(
+static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
return __of_reset_control_get(node, id, 0, 0);
}
/**
- * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
- * a reset controller by index.
+ * of_reset_control_get_shared - Lookup and obtain an shared reference
+ * to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_shared(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, 1);
+}
+
+/**
+ * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
+ * reference to a reset controller
+ * by index.
* @node: device to be reset by the controller
* @index: index of the reset controller
*
* in whatever order. Returns a struct reset_control or IS_ERR() condition
* containing errno.
*/
-static inline struct reset_control *of_reset_control_get_by_index(
+static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
return __of_reset_control_get(node, NULL, index, 0);
}
/**
- * devm_reset_control_get - resource managed reset_control_get()
- * @dev: device to be reset by the controller
- * @id: reset line name
+ * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ * reference to a reset controller
+ * by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
*
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
*/
-static inline struct reset_control *__must_check devm_reset_control_get(
- struct device *dev, const char *id)
-{
-#ifndef CONFIG_RESET_CONTROLLER
- WARN_ON(1);
-#endif
- return __devm_reset_control_get(dev, id, 0, 0);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
- struct device *dev, const char *id)
+static inline struct reset_control *of_reset_control_get_shared_by_index(
+ struct device_node *node, int index)
{
- return __devm_reset_control_get(dev, id, 0, 0);
+ return __of_reset_control_get(node, NULL, index, 1);
}
/**
- * devm_reset_control_get_by_index - resource managed reset_control_get
+ * devm_reset_control_get_exclusive - resource managed
+ * reset_control_get_exclusive()
* @dev: device to be reset by the controller
- * @index: index of the reset controller
+ * @id: reset line name
*
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * Managed reset_control_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
*/
-static inline struct reset_control *devm_reset_control_get_by_index(
- struct device *dev, int index)
+static inline struct reset_control *
+__must_check devm_reset_control_get_exclusive(struct device *dev,
+ const char *id)
{
- return __devm_reset_control_get(dev, NULL, index, 0);
+#ifndef CONFIG_RESET_CONTROLLER
+ WARN_ON(1);
+#endif
+ return __devm_reset_control_get(dev, id, 0, 0);
}
/**
return __devm_reset_control_get(dev, id, 0, 1);
}
+static inline struct reset_control *devm_reset_control_get_optional_exclusive(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional_shared(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 1);
+}
+
+/**
+ * devm_reset_control_get_exclusive_by_index - resource managed
+ * reset_control_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_exclusive(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
+{
+ return __devm_reset_control_get(dev, NULL, index, 0);
+}
+
/**
* devm_reset_control_get_shared_by_index - resource managed
* reset_control_get_shared
* this function, reset_control_put() is called automatically on driver detach.
* See reset_control_get_shared() for more information.
*/
-static inline struct reset_control *devm_reset_control_get_shared_by_index(
- struct device *dev, int index)
+static inline struct reset_control *
+devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
return __devm_reset_control_get(dev, NULL, index, 1);
}
+/*
+ * TEMPORARY calls to use during transition:
+ *
+ * of_reset_control_get() => of_reset_control_get_exclusive()
+ *
+ * These inline function calls will be removed once all consumers
+ * have been moved over to the new explicit API.
+ */
+static inline struct reset_control *reset_control_get(
+ struct device *dev, const char *id)
+{
+ return reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return reset_control_get_optional_exclusive(dev, id);
+}
+
+static inline struct reset_control *of_reset_control_get(
+ struct device_node *node, const char *id)
+{
+ return of_reset_control_get_exclusive(node, id);
+}
+
+static inline struct reset_control *of_reset_control_get_by_index(
+ struct device_node *node, int index)
+{
+ return of_reset_control_get_exclusive_by_index(node, index);
+}
+
+static inline struct reset_control *devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+ return devm_reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return devm_reset_control_get_optional_exclusive(dev, id);
+
+}
+
+static inline struct reset_control *devm_reset_control_get_by_index(
+ struct device *dev, int index)
+{
+ return devm_reset_control_get_exclusive_by_index(dev, index);
+}
#endif
/*
* rmap interfaces called when adding or removing pte of page
*/
-void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_move_anon_rmap(struct page *, struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, bool);
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
__u8 sctpi_s_disable_fragments;
__u8 sctpi_s_v4mapped;
__u8 sctpi_s_frag_interleave;
+ __u32 sctpi_s_type;
+ __u32 __reserved3;
};
struct sctp_infox {
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- return lockless_dereference(s)->sequence;
+ int seq = READ_ONCE(s->sequence);
+ /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
+ smp_read_barrier_depends();
+ return seq;
}
/**
* unsigned seq, idx;
*
* do {
- * seq = lockless_dereference(latch)->seq;
+ * seq = raw_read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
}
void __skb_get_hash(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen);
skb->csum = csum_partial(start, len, skb->csum);
}
+/**
+ * skb_push_rcsum - push skb and update receive checksum
+ * @skb: buffer to update
+ * @len: length of data pulled
+ *
+ * This function performs an skb_push on the packet and updates
+ * the CHECKSUM_COMPLETE checksum. It should be used on
+ * receive path processing instead of skb_push unless you know
+ * that the checksum difference is zero (e.g., a valid IP header)
+ * or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
+ unsigned int len)
+{
+ skb_push(skb, len);
+ skb_postpush_rcsum(skb, skb->data, len);
+ return skb->data;
+}
+
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
{
switch (sk->sk_family) {
case AF_INET:
+ if (sk->sk_type == SOCK_RAW)
+ return SKNLGRP_NONE;
+
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET_TCP_DESTROY;
return SKNLGRP_NONE;
}
case AF_INET6:
+ if (sk->sk_type == SOCK_RAW)
+ return SKNLGRP_NONE;
+
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET6_TCP_DESTROY;
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
- struct rpc_xprt *xprt);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
const struct rpc_program *, u32);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
struct net *xpt_net;
struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
+ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
size_t addrlen;
const char *servername;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
+ struct rpc_xprt_switch *bc_xps;
unsigned int flags;
};
* @get_trend: a pointer to a function that reads the sensor temperature trend.
* @set_emul_temp: a pointer to a function that sets sensor emulated
* temperature.
+ * @set_trip_temp: a pointer to a function that sets the trip temperature on
+ * hardware.
*/
struct thermal_zone_of_device_ops {
int (*get_temp)(void *, int *);
struct timespec64 ts64;
if (!tv)
+ return do_sys_settimeofday64(NULL, tz);
+
+ if (!timespec_valid(tv))
return -EINVAL;
ts64 = timespec_to_timespec64(*tv);
* PORTSCx
*/
/* HOSTPC: offset 0x84 */
- u32 hostpc[1]; /* HOSTPC extension */
+ u32 hostpc[0]; /* HOSTPC extension */
#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
#define HOSTPC_PSPD (3<<25) /* Port speed detection */
- u32 reserved5[16];
+ u32 reserved5[17];
/* USBMODE_EX: offset 0xc8 */
u32 usbmode_ex; /* USB Device mode extension */
* @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
* this driver will be bound to any available UDC.
* @pending: UDC core private data used for deferred probe of this driver.
+ * @match_existing_only: If udc is not found, return an error and don't add this
+ * gadget driver to list of pending driver
*
* Devices are disabled till a gadget driver successfully bind()s, which
* means the driver will handle setup() requests needed to enumerate (and
char *udc_name;
struct list_head pending;
+ unsigned match_existing_only:1;
};
};
#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
-void musb_mailbox(enum musb_vbus_id_status status);
+int musb_mailbox(enum musb_vbus_id_status status);
#else
-static inline void musb_mailbox(enum musb_vbus_id_status status)
+static inline int musb_mailbox(enum musb_vbus_id_status status)
{
+ return 0;
}
#endif
/*
* v4l2-mc.h - Media Controller V4L2 types and prototypes
*
- * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
* Copyright (C) 2006-2010 Nokia Corporation
* Copyright (c) 2016 Intel Corporation.
*
#define BOND_DEFAULT_MIIMON 100
+#ifndef __long_aligned
+#define __long_aligned __attribute__((aligned((sizeof(long)))))
+#endif
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
struct reciprocal_value reciprocal_packets_per_slave;
u16 ad_actor_sys_prio;
u16 ad_user_port_key;
- u8 ad_actor_system[ETH_ALEN];
+
+ /* 2 bytes of padding : see ether_addr_equal_64bits() */
+ u8 ad_actor_system[ETH_ALEN + 2];
};
struct bond_parm_tbl {
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err, __be16 proto);
+ bool *csum_err, __be16 proto, int nhs);
static inline int gre_calc_hlen(__be16 o_flags)
{
return min(dst->dev->mtu, IP_MAX_MTU);
}
-static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ const struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
-
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
u8 *protocol, struct flowi6 *fl6);
};
+#ifdef CONFIG_INET
+
extern const struct ip6_tnl_encap_ops __rcu *
ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
int ip6_tnl_get_iflink(const struct net_device *dev);
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
-#ifdef CONFIG_INET
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
struct net_device *dev)
{
const char *ip_vs_state_name(__u16 proto, int state);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-int ip_vs_check_template(struct ip_vs_conn *ct);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
int ip_vs_conn_init(void);
void ip_vs_conn_cleanup(void);
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+ long timeout = (long)ct->timeout.expires - (long)jiffies;
+
+ return timeout > 0 ? timeout : 0;
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
struct nf_hook_ops *ops);
};
-void nf_register_queue_handler(const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(void);
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(struct net *net);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
struct nft_set;
struct nft_set_iter {
+ u8 genmask;
unsigned int count;
unsigned int skip;
int err;
struct proc_dir_entry;
struct nf_logger;
+struct nf_queue_handler;
struct netns_nf {
#if defined CONFIG_PROC_FS
struct proc_dir_entry *proc_netfilter;
#endif
+ const struct nf_queue_handler __rcu *queue_handler;
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
};
};
-static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev,
+ const struct tcf_proto *tp, u32 flags)
{
+ const struct Qdisc *sch = tp->q;
+ const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
+
if (!(dev->features & NETIF_F_HW_TC))
return false;
-
if (flags & TCA_CLS_FLAGS_SKIP_HW)
return false;
-
if (!dev->netdev_ops->ndo_setup_tc)
return false;
+ if (cops && cops->tcf_cl_offload)
+ return cops->tcf_cl_offload(tp->classid);
return true;
}
}
struct qdisc_watchdog {
+ u64 last_expires;
struct hrtimer timer;
struct Qdisc *qdisc;
};
/* Filter manipulation */
struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
if (!sch->gso_skb) {
sch->gso_skb = sch->dequeue(sch);
- if (sch->gso_skb)
+ if (sch->gso_skb) {
/* it's still part of the queue */
+ qdisc_qstats_backlog_inc(sch, sch->gso_skb);
sch->q.qlen++;
+ }
}
return sch->gso_skb;
if (skb) {
sch->gso_skb = NULL;
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
skb = sch->dequeue(sch);
*/
void sock_gen_put(struct sock *sk);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+ unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested)
+{
+ return __sk_receive_skb(sk, skb, nested, 1);
+}
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
- u32 ageing_time; /* BRIDGE_AGEING_TIME */
+ clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
} u;
};
int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
int (*decode)(struct sk_buff *, void *, u16 len);
int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
- int (*alloc)(struct tcf_meta_info *, void *);
+ int (*alloc)(struct tcf_meta_info *, void *, gfp_t);
void (*release)(struct tcf_meta_info *);
int (*validate)(void *val, int len);
struct module *owner;
int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
const void *dval);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
IB_DEVICE_CROSS_CHANNEL = (1 << 27),
IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
- IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
+ IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
- IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
- IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
+ IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
+ IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
};
enum ib_signature_prot_cap {
/*
* Allocate a private queue pair data structure for driver specific
- * information which is opaque to rdmavt.
+ * information which is opaque to rdmavt. Errors are returned via
+ * ERR_PTR(err). The driver is free to return NULL or a valid
+ * pointer.
*/
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
gfp_t gfp);
* @ops: pointer to DSP callbacks
* @runtime: pointer to runtime structure
* @device: device pointer
+ * @error_work: delayed work used when closing the stream due to an error
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
struct snd_compr_ops *ops;
struct snd_compr_runtime *runtime;
struct snd_compr *device;
+ struct delayed_work error_work;
enum snd_compr_direction direction;
bool metadata_set;
bool next_track;
wake_up(&stream->runtime->sleep);
}
+int snd_compr_stop_error(struct snd_compr_stream *stream,
+ snd_pcm_state_t state);
+
#endif
--- /dev/null
+/*
+ * linux/sound/cs35l33.h -- Platform data for CS35l33
+ *
+ * Copyright (c) 2016 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS35L33_H
+#define __CS35L33_H
+
+struct cs35l33_hg {
+ bool enable_hg_algo;
+ unsigned int mem_depth;
+ unsigned int release_rate;
+ unsigned int hd_rm;
+ unsigned int ldo_thld;
+ unsigned int ldo_path_disable;
+ unsigned int ldo_entry_delay;
+ bool vp_hg_auto;
+ unsigned int vp_hg;
+ unsigned int vp_hg_rate;
+ unsigned int vp_hg_va;
+};
+
+struct cs35l33_pdata {
+ /* Boost Controller Voltage Setting */
+ unsigned int boost_ctl;
+
+ /* Boost Controller Peak Current */
+ unsigned int boost_ipk;
+
+ /* Amplifier Drive Select */
+ unsigned int amp_drv_sel;
+
+ /* soft volume ramp */
+ unsigned int ramp_rate;
+
+ /* IMON adc scale */
+ unsigned int imon_adc_scale;
+
+ /* H/G algo configuration */
+ struct cs35l33_hg hg_config;
+};
+
+#endif /* __CS35L33_H */
int channels;
};
+struct hdmi_codec_pdata;
struct hdmi_codec_ops {
/*
* Called when ASoC starts an audio stream setup.
* Optional
*/
- int (*audio_startup)(struct device *dev);
+ int (*audio_startup)(struct device *dev, void *data);
/*
* Configures HDMI-encoder for audio stream.
* Mandatory
*/
- int (*hw_params)(struct device *dev,
+ int (*hw_params)(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
* Shuts down the audio stream.
* Mandatory
*/
- void (*audio_shutdown)(struct device *dev);
+ void (*audio_shutdown)(struct device *dev, void *data);
/*
* Mute/unmute HDMI audio stream.
* Optional
*/
- int (*digital_mute)(struct device *dev, bool enable);
+ int (*digital_mute)(struct device *dev, void *data, bool enable);
/*
* Provides EDID-Like-Data from connected HDMI device.
* Optional
*/
- int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+ int (*get_eld)(struct device *dev, void *data,
+ uint8_t *buf, size_t len);
};
/* HDMI codec initalization data */
uint i2s:1;
uint spdif:1;
int max_i2s_channels;
+ void *data;
};
#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
#define __SIMPLE_CARD_H
#include <sound/soc.h>
-
-struct asoc_simple_dai {
- const char *name;
- unsigned int sysclk;
- int slots;
- int slot_width;
- unsigned int tx_slot_mask;
- unsigned int rx_slot_mask;
- struct clk *clk;
-};
+#include <sound/simple_card_utils.h>
struct asoc_simple_card_info {
const char *name;
--- /dev/null
+/*
+ * simple_card_core.h
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SIMPLE_CARD_CORE_H
+#define __SIMPLE_CARD_CORE_H
+
+#include <sound/soc.h>
+
+struct asoc_simple_dai {
+ const char *name;
+ unsigned int sysclk;
+ int slots;
+ int slot_width;
+ unsigned int tx_slot_mask;
+ unsigned int rx_slot_mask;
+ struct clk *clk;
+};
+
+int asoc_simple_card_parse_daifmt(struct device *dev,
+ struct device_node *node,
+ struct device_node *codec,
+ char *prefix,
+ unsigned int *retfmt);
+int asoc_simple_card_set_dailink_name(struct device *dev,
+ struct snd_soc_dai_link *dai_link,
+ const char *fmt, ...);
+int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
+ char *prefix);
+
+#endif /* __SIMPLE_CARD_CORE_H */
struct regulator;
struct snd_soc_dapm_widget_list;
struct snd_soc_dapm_update;
+enum snd_soc_dapm_direction;
int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget,
int num);
+struct snd_soc_dapm_widget *snd_soc_dapm_new_control(
+ struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget);
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
/* dapm path query */
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
- struct snd_soc_dapm_widget_list **list);
+ struct snd_soc_dapm_widget_list **list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction));
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_S_VALUE(reg_left, reg_right, xshift, \
xmin, xmax, xsign_bit, xinvert) }
+#define SOC_SINGLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
+ .put = snd_soc_put_volsw, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, \
+ .min = xmin, .max = xmax, .platform_max = xmax, \
+ .sign_bit = 7,} }
#define SOC_DOUBLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
header-y += hw_breakpoint.h
header-y += l2tp.h
header-y += libc-compat.h
+header-y += lirc.h
header-y += limits.h
header-y += llc.h
header-y += loop.h
};
union {
char name[BTRFS_SUBVOL_NAME_MAX + 1];
- u64 devid;
+ __u64 devid;
};
};
ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
*
* 7.24
* - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
+ *
+ * 7.25
+ * - add FUSE_PARALLEL_DIROPS
*/
#ifndef _LINUX_FUSE_H
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 24
+#define FUSE_KERNEL_MINOR_VERSION 25
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
* FUSE_ASYNC_DIO: asynchronous direct I/O submission
* FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
* FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
+ * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
#define FUSE_ASYNC_DIO (1 << 15)
#define FUSE_WRITEBACK_CACHE (1 << 16)
#define FUSE_NO_OPEN_SUPPORT (1 << 17)
+#define FUSE_PARALLEL_DIROPS (1 << 18)
/**
* CUSE INIT request/reply flags
#ifndef _UAPI_LINUX_GTP_H_
-#define _UAPI_LINUX_GTP_H__
+#define _UAPI_LINUX_GTP_H_
enum gtp_genl_cmds {
GTP_CMD_NEWPDP,
#define KEY_KBDINPUTASSIST_ACCEPT 0x264
#define KEY_KBDINPUTASSIST_CANCEL 0x265
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP 0x266
+#define KEY_RIGHT_DOWN 0x267
+#define KEY_LEFT_UP 0x268
+#define KEY_LEFT_DOWN 0x269
+
+#define KEY_ROOT_MENU 0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU 0x26b
+#define KEY_NUMERIC_11 0x26c
+#define KEY_NUMERIC_12 0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC 0x26e
+#define KEY_3D_MODE 0x26f
+#define KEY_NEXT_FAVORITE 0x270
+#define KEY_STOP_RECORD 0x271
+#define KEY_PAUSE_RECORD 0x272
+#define KEY_VOD 0x273 /* Video on Demand */
+#define KEY_UNMUTE 0x274
+#define KEY_FASTREVERSE 0x275
+#define KEY_SLOWREVERSE 0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA 0x275
+
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
+#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)
#define BUS_ATARI 0x1B
#define BUS_SPI 0x1C
#define BUS_RMI 0x1D
+#define BUS_CEC 0x1E
/*
* MT_TOOL types
header-y += xt_NFQUEUE.h
header-y += xt_RATEEST.h
header-y += xt_SECMARK.h
+header-y += xt_SYNPROXY.h
header-y += xt_TCPMSS.h
header-y += xt_TCPOPTSTRIP.h
header-y += xt_TEE.h
#ifndef _XT_SYNPROXY_H
#define _XT_SYNPROXY_H
+#include <linux/types.h>
+
#define XT_SYNPROXY_OPT_MSS 0x01
#define XT_SYNPROXY_OPT_WSCALE 0x02
#define XT_SYNPROXY_OPT_SACK_PERM 0x04
TCA_POLICE_PEAKRATE,
TCA_POLICE_AVRATE,
TCA_POLICE_RESULT,
+ TCA_POLICE_TM,
+ TCA_POLICE_PAD,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
- TCA_U32_ACT,
+ TCA_U32_ACT,
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
# UAPI Header export list
header-y += asequencer.h
+header-y += asoc.h
header-y += asound.h
header-y += asound_fm.h
header-y += compress_offload.h
header-y += hdspm.h
header-y += sb16_csp.h
header-y += sfnt_info.h
+header-y += tlv.h
+header-y += usb_stream.h
config KALLSYMS_ABSOLUTE_PERCPU
bool
+ depends on KALLSYMS
default X86_64 && SMP
config KALLSYMS_BASE_RELATIVE
}
# if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
{
}
#endif
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
#endif
- thread_info_cache_init();
+ thread_stack_cache_init();
cred_init();
fork_init();
proc_caches_init();
{
struct blacklist_entry *entry;
char fn_name[KSYM_SYMBOL_LEN];
+ unsigned long addr;
if (list_empty(&blacklisted_initcalls))
return false;
- sprint_symbol_no_offset(fn_name, (unsigned long)fn);
+ addr = (unsigned long) dereference_function_descriptor(fn);
+ sprint_symbol_no_offset(fn_name, addr);
list_for_each_entry(entry, &blacklisted_initcalls, next) {
if (!strcmp(fn_name, entry->buf)) {
audit_log_format(ab, " exe=(null)");
}
+struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+ struct tty_struct *tty = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ if (tsk->signal)
+ tty = tty_kref_get(tsk->signal->tty);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ return tty;
+}
+
+void audit_put_tty(struct tty_struct *tty)
+{
+ tty_kref_put(tty);
+}
+
void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
{
const struct cred *cred;
#include <linux/audit.h>
#include <linux/skbuff.h>
#include <uapi/linux/mqueue.h>
+#include <linux/tty.h>
/* AUDIT_NAMES is the number of slots we reserve in the audit_context
* for saving names from getname(). If we get more names we will allocate
extern void audit_log_d_path_exe(struct audit_buffer *ab,
struct mm_struct *mm);
+extern struct tty_struct *audit_get_tty(struct task_struct *tsk);
+extern void audit_put_tty(struct tty_struct *tty);
+
/* audit watch functions */
#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
#include <asm/unistd.h>
#include <linux/security.h>
#include <linux/list.h>
-#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
if (!audit_enabled)
return;
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+ if (!ab)
+ return;
+
uid = from_kuid(&init_user_ns, task_uid(current));
oldloginuid = from_kuid(&init_user_ns, koldloginuid);
loginuid = from_kuid(&init_user_ns, kloginuid),
tty = audit_get_tty(current);
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
- if (!ab)
- return;
audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
audit_log_task_context(ab);
audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
.name = "bpf",
.mount = bpf_mount,
.kill_sb = kill_litter_super,
- .fs_flags = FS_USERNS_MOUNT,
};
MODULE_ALIAS_FS("bpf");
* are set to NOT_INIT to indicate that they are no longer readable.
*/
-/* types of values stored in eBPF registers */
-enum bpf_reg_type {
- NOT_INIT = 0, /* nothing was written into register */
- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
- PTR_TO_CTX, /* reg points to bpf_context */
- CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
- PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
- FRAME_PTR, /* reg == frame_pointer */
- PTR_TO_STACK, /* reg == frame_pointer + imm */
- CONST_IMM, /* constant integer value */
-
- /* PTR_TO_PACKET represents:
- * skb->data
- * skb->data + imm
- * skb->data + (u16) var
- * skb->data + (u16) var + imm
- * if (range > 0) then [ptr, ptr + range - off) is safe to access
- * if (id > 0) means that some 'var' was added
- * if (off > 0) menas that 'imm' was added
- */
- PTR_TO_PACKET,
- PTR_TO_PACKET_END, /* skb->data + headlen */
-};
-
struct reg_state {
enum bpf_reg_type type;
union {
/* check access to 'struct bpf_context' fields */
static int check_ctx_access(struct verifier_env *env, int off, int size,
- enum bpf_access_type t)
+ enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
if (env->prog->aux->ops->is_valid_access &&
- env->prog->aux->ops->is_valid_access(off, size, t)) {
+ env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
mark_reg_unknown_value(state->regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
+ enum bpf_reg_type reg_type = UNKNOWN_VALUE;
+
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
- err = check_ctx_access(env, off, size, t);
+ err = check_ctx_access(env, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value(state->regs, value_regno);
- if (off == offsetof(struct __sk_buff, data) &&
- env->allow_ptr_leaks)
+ if (env->allow_ptr_leaks)
/* note that reg.[id|off|range] == 0 */
- state->regs[value_regno].type = PTR_TO_PACKET;
- else if (off == offsetof(struct __sk_buff, data_end) &&
- env->allow_ptr_leaks)
- state->regs[value_regno].type = PTR_TO_PACKET_END;
+ state->regs[value_regno].type = reg_type;
}
} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
static void put_css_set(struct css_set *cset)
{
+ unsigned long flags;
+
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irqsave(&css_set_lock, flags);
put_css_set_locked(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irqrestore(&css_set_lock, flags);
}
/*
/* First see if we already have a cgroup group that matches
* the desired set */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = find_existing_css_set(old_cset, cgrp, template);
if (cset)
get_css_set(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (cset)
return cset;
* find_existing_css_set() */
memcpy(cset->subsys, template, sizeof(cset->subsys));
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
css_get(css);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return cset;
}
* Release all the links from cset_links to this hierarchy's
* root cgroup
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
list_del(&link->cset_link);
kfree(link);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (!list_empty(&root->root_list)) {
list_del(&root->root_list);
ss->root = dst_root;
css->cgroup = dcgrp;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist)
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* default hierarchy doesn't enable controllers by default */
dst_root->subsys_mask |= 1 << ssid;
if (!buf)
return -ENOMEM;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (len >= PATH_MAX)
len = -ERANGE;
{
struct task_struct *p, *g;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
if (use_task_css_set_links)
goto out_unlock;
* entry won't be deleted though the process has exited.
* Do it while holding siglock so that we don't end up
* racing against cgroup_exit().
+ *
+ * Interrupts were already disabled while acquiring
+ * the css_set_lock, so we do not need to disable it
+ * again when acquiring the sighand->siglock here.
*/
- spin_lock_irq(&p->sighand->siglock);
+ spin_lock(&p->sighand->siglock);
if (!(p->flags & PF_EXITING)) {
struct css_set *cset = task_css_set(p);
list_add_tail(&p->cg_list, &cset->tasks);
get_css_set(cset);
}
- spin_unlock_irq(&p->sighand->siglock);
+ spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
out_unlock:
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
static void init_cgroup_housekeeping(struct cgroup *cgrp)
* Link the root cgroup in this hierarchy into all the css_set
* objects.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist) {
link_css_set(&tmp_links, cset, root_cgrp);
if (css_set_populated(cset))
cgroup_update_populated(root_cgrp, true);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1);
struct cgroup *cgrp;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgrp = cset_cgroup_from_root(ns->root_cset, root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
char *ret;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
return ret;
char *path = NULL;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
path = buf;
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
return path;
}
* the new cgroup. There are no failure cases after here, so this
* is the commit point.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(cset, &tset->src_csets, mg_node) {
list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
struct css_set *from_cset = task_css_set(task);
put_css_set_locked(from_cset);
}
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/*
* Migration is committed, all target tasks are now on dst_csets.
}
} while_each_subsys_mask();
out_release_tset:
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_splice_init(&tset->dst_csets, &tset->src_csets);
list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
list_del_init(&cset->mg_node);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return ret;
}
lockdep_assert_held(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cgrp = NULL;
list_del_init(&cset->mg_preload_node);
put_css_set_locked(cset);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/**
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return cgroup_taskset_migrate(&tset, root);
}
return -EBUSY;
/* look up all src csets */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(&preloaded_csets);
struct cgroup *cgrp;
struct inode *inode;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
while (!cgroup_is_descendant(dst_cgrp, cgrp))
cgrp = cgroup_parent(cgrp);
if (root == &cgrp_dfl_root)
continue;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
from_cgrp = task_cgroup_from_root(from, root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval)
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
struct cgrp_cset_link *link;
cgroup_migrate_add_src(link->cset, dsct,
&preloaded_csets);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&preloaded_csets);
if (ret)
goto out_finish;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
struct task_struct *task, *ntask;
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
cgroup_taskset_add(task, &tset);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
ret = cgroup_taskset_migrate(&tset, cgrp->root);
out_finish:
int count = 0;
struct cgrp_cset_link *link;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
count += atomic_read(&link->cset->refcount);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return count;
}
memset(it, 0, sizeof(*it));
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
it->ss = css->ss;
css_task_iter_advance_css_set(it);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/**
it->cur_task = NULL;
}
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct,
css_task_iter_advance(it);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return it->cur_task;
}
void css_task_iter_end(struct css_task_iter *it)
{
if (it->cur_cset) {
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_del(&it->iters_node);
put_css_set_locked(it->cur_cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
if (it->cur_task)
mutex_lock(&cgroup_mutex);
/* all tasks in @from are being moved, all csets are source */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
ret = cgroup_migrate_prepare_dst(&preloaded_csets);
if (ret)
memset(css, 0, sizeof(*css));
css->cgroup = cgrp;
css->ss = ss;
+ css->id = -1;
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
if (err < 0)
- goto err_free_percpu_ref;
+ goto err_free_css;
css->id = err;
/* @css is ready to be brought online now, make it visible */
err_list_del:
list_del_rcu(&css->sibling);
- cgroup_idr_remove(&ss->css_idr, css->id);
-err_free_percpu_ref:
- percpu_ref_exit(&css->refcnt);
err_free_css:
call_rcu(&css->rcu_head, css_free_rcu_fn);
return ERR_PTR(err);
*/
cgrp->self.flags &= ~CSS_ONLINE;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
link->cset->dead = true;
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* initiate massacre of all css's */
for_each_css(css, ssid, cgrp)
goto out;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
for_each_root(root) {
struct cgroup_subsys *ss;
retval = 0;
out_unlock:
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
kfree(buf);
out:
if (use_task_css_set_links) {
struct css_set *cset;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
if (list_empty(&child->cg_list)) {
get_css_set(cset);
css_set_move_task(child, NULL, cset, false);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/*
cset = task_css_set(tsk);
if (!list_empty(&tsk->cg_list)) {
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
} else {
get_css_set(cset);
}
if (!pathbuf || !agentbuf)
goto out;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (!path)
goto out;
return ERR_PTR(-EPERM);
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
get_css_set(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
new_ns = alloc_cgroup_ns();
if (!name_buf)
return -ENOMEM;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
cset = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
c->root->hierarchy_id, name_buf);
}
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
kfree(name_buf);
return 0;
}
struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
overflow:
seq_puts(seq, " ...\n");
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return 0;
}
.teardown = takedown_cpu,
.cant_stop = true,
},
+#else
+ [CPUHP_BRINGUP_CPU] = { },
#endif
};
return event->state == PERF_EVENT_STATE_DEAD;
}
-static inline int pmu_filter_match(struct perf_event *event)
+static inline int __pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
return pmu->filter_match ? pmu->filter_match(event) : 1;
}
+/*
+ * Check whether we should attempt to schedule an event group based on
+ * PMU-specific filtering. An event group can consist of HW and SW events,
+ * potentially with a SW leader, so we must check all the filters, to
+ * determine whether a group is schedulable:
+ */
+static inline int pmu_filter_match(struct perf_event *event)
+{
+ struct perf_event *child;
+
+ if (!__pmu_filter_match(event))
+ return 0;
+
+ list_for_each_entry(child, &event->sibling_list, group_entry) {
+ if (!__pmu_filter_match(child))
+ return 0;
+ }
+
+ return 1;
+}
+
static inline int
event_filter_match(struct perf_event *event)
{
if (event->ctx)
put_ctx(event->ctx);
- if (event->pmu) {
- exclusive_event_destroy(event);
- module_put(event->pmu->module);
- }
+ exclusive_event_destroy(event);
+ module_put(event->pmu->module);
call_rcu(&event->rcu_head, free_event_rcu);
}
prog = event->tp_event->prog;
if (prog) {
event->tp_event->prog = NULL;
- bpf_prog_put(prog);
+ bpf_prog_put_rcu(prog);
}
}
}
#endif
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
{
}
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
return page ? page_address(page) : NULL;
}
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
{
- struct page *page = virt_to_page(ti);
+ struct page *page = virt_to_page(stack);
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-(1 << THREAD_SIZE_ORDER));
__free_kmem_pages(page, THREAD_SIZE_ORDER);
}
# else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
- return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+ return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
{
- kmem_cache_free(thread_info_cache, ti);
+ kmem_cache_free(thread_stack_cache, stack);
}
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
+ BUG_ON(thread_stack_cache == NULL);
}
# endif
#endif
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
{
- struct zone *zone = page_zone(virt_to_page(ti));
+ struct zone *zone = page_zone(virt_to_page(stack));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
- arch_release_thread_info(tsk->stack);
- free_thread_info(tsk->stack);
+ arch_release_thread_stack(tsk->stack);
+ free_thread_stack(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
- struct thread_info *ti;
+ unsigned long *stack;
int err;
if (node == NUMA_NO_NODE)
if (!tsk)
return NULL;
- ti = alloc_thread_info_node(tsk, node);
- if (!ti)
+ stack = alloc_thread_stack_node(tsk, node);
+ if (!stack)
goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
if (err)
- goto free_ti;
+ goto free_stack;
- tsk->stack = ti;
+ tsk->stack = stack;
#ifdef CONFIG_SECCOMP
/*
* We must handle setting up seccomp filters once we're under
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
- account_kernel_stack(ti, 1);
+ account_kernel_stack(stack, 1);
kcov_task_init(tsk);
return tsk;
-free_ti:
- free_thread_info(ti);
+free_stack:
+ free_thread_stack(stack);
free_tsk:
free_task_struct(tsk);
return NULL;
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
- struct page *page;
+ struct page *page, *tail;
struct address_space *mapping;
int err, ro = 0;
* considered here and page lock forces unnecessarily serialization
* From this point on, mapping will be re-verified if necessary and
* page lock will be acquired only if it is unavoidable
- */
+ *
+ * Mapping checks require the head page for any compound page so the
+ * head page and mapping is looked up now. For anonymous pages, it
+ * does not matter if the page splits in the future as the key is
+ * based on the address. For filesystem-backed pages, the tail is
+ * required as the index of the page determines the key. For
+ * base pages, there is no tail page and tail == page.
+ */
+ tail = page;
page = compound_head(page);
mapping = READ_ONCE(page->mapping);
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = inode;
- key->shared.pgoff = basepage_index(page);
+ key->shared.pgoff = basepage_index(tail);
rcu_read_unlock();
}
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
domain = data->domain;
if (WARN_ON(domain == NULL))
- return;
+ return -EINVAL;
if (!irq_domain_is_ipi(domain)) {
pr_warn("Trying to destroy a non IPI domain!\n");
void static_key_slow_inc(struct static_key *key)
{
+ int v, v1;
+
STATIC_KEY_CHECK_USE();
- if (atomic_inc_not_zero(&key->enabled))
- return;
+
+ /*
+ * Careful if we get concurrent static_key_slow_inc() calls;
+ * later calls must wait for the first one to _finish_ the
+ * jump_label_update() process. At the same time, however,
+ * the jump_label_update() call below wants to see
+ * static_key_enabled(&key) for jumps to be updated properly.
+ *
+ * So give a special meaning to negative key->enabled: it sends
+ * static_key_slow_inc() down the slow path, and it is non-zero
+ * so it counts as "enabled" in jump_label_update(). Note that
+ * atomic_inc_unless_negative() checks >= 0, so roll our own.
+ */
+ for (v = atomic_read(&key->enabled); v > 0; v = v1) {
+ v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
+ if (likely(v1 == v))
+ return;
+ }
jump_label_lock();
- if (atomic_inc_return(&key->enabled) == 1)
+ if (atomic_read(&key->enabled) == 0) {
+ atomic_set(&key->enabled, -1);
jump_label_update(key);
+ atomic_set(&key->enabled, 1);
+ } else {
+ atomic_inc(&key->enabled);
+ }
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
{
+ /*
+ * The negative count check is valid even when a negative
+ * key->enabled is in use by static_key_slow_inc(); a
+ * __static_key_slow_dec() before the first static_key_slow_inc()
+ * returns is unbalanced, because all other static_key_slow_inc()
+ * instances block while the update is in progress.
+ */
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
static int __init kcov_init(void)
{
- if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
+ /*
+ * The kcov debugfs file won't ever get removed and thus,
+ * there is no need to protect it against removal races. The
+ * use of debugfs_create_file_unsafe() is actually safe here.
+ */
+ if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
pr_err("failed to create kcov in debugfs\n");
return -ENOMEM;
}
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti)
+ struct task_struct *task)
{
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
/* Mark the current thread as blocked on the lock: */
- ti->task->blocked_on = waiter;
+ task->blocked_on = waiter;
}
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti)
+ struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
- ti->task->blocked_on = NULL;
+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+ task->blocked_on = NULL;
list_del_init(&waiter->list);
waiter->task = NULL;
extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
- struct thread_info *ti);
+ struct task_struct *task);
extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti);
+ struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
if (!hold_ctx)
return 0;
- if (unlikely(ctx == hold_ctx))
- return -EALREADY;
-
if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
(ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
#ifdef CONFIG_DEBUG_MUTEXES
unsigned long flags;
int ret;
+ if (use_ww_ctx) {
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+ return -EALREADY;
+ }
+
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
goto skip_wait;
debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+ debug_mutex_add_waiter(lock, &waiter, task);
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
}
__set_task_state(task, TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current_thread_info());
+ mutex_remove_waiter(lock, &waiter, task);
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
return 0;
err:
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ mutex_remove_waiter(lock, &waiter, task);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
do { spin_lock(lock); (void)(flags); } while (0)
#define spin_unlock_mutex(lock, flags) \
do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
__list_del((waiter)->list.prev, (waiter)->list.next)
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif
+/*
+ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
+ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
+ *
+ * This means that the store can be delayed, but no later than the
+ * store-release from the unlock. This means that simply observing
+ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
+ *
+ * There are two paths that can issue the unordered store:
+ *
+ * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
+ *
+ * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
+ * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
+ *
+ * However, in both cases we have other !0 state we've set before to queue
+ * ourseves:
+ *
+ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
+ * load is constrained by that ACQUIRE to not pass before that, and thus must
+ * observe the store.
+ *
+ * For (2) we have a more intersting scenario. We enqueue ourselves using
+ * xchg_tail(), which ends up being a RELEASE. This in itself is not
+ * sufficient, however that is followed by an smp_cond_acquire() on the same
+ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
+ * guarantees we must observe that store.
+ *
+ * Therefore both cases have other !0 state that is observable before the
+ * unordered locked byte store comes through. This means we can use that to
+ * wait for the lock store, and then wait for an unlock.
+ */
+#ifndef queued_spin_unlock_wait
+void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ u32 val;
+
+ for (;;) {
+ val = atomic_read(&lock->val);
+
+ if (!val) /* not locked, we're done */
+ goto done;
+
+ if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
+ break;
+
+ /* not locked, but pending, wait until we observe the lock */
+ cpu_relax();
+ }
+
+ /* any unlock is good */
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+
+done:
+ smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+}
+EXPORT_SYMBOL(queued_spin_unlock_wait);
+#endif
+
#endif /* _GEN_PV_LOCK_SLOWPATH */
/**
if (!error && !oom_killer_disable())
error = -EBUSY;
+ /*
+ * There is a hard to fix race between oom_reaper kernel thread
+ * and oom_killer_disable. oom_reaper calls exit_oom_victim
+ * before the victim reaches exit_mm so try to freeze all the tasks
+ * again and catch such a left over task.
+ */
+ if (!error) {
+ pr_info("Double checking all user space processes after OOM killer disable... ");
+ error = try_to_freeze_tasks(true);
+ pr_cont("\n");
+ }
+
if (error)
thaw_processes();
return error;
kref_put(&chan->kref, relay_destroy_channel);
mutex_unlock(&relay_channels_mutex);
+ kfree(chan);
return NULL;
}
EXPORT_SYMBOL_GPL(relay_open);
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
- if (!cpu_active(dest_cpu))
+ if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+ continue;
+ if (!cpu_online(dest_cpu))
continue;
goto out;
}
#endif
#endif
+#ifdef CONFIG_SCHEDSTATS
+
DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+static bool __initdata __sched_schedstats = false;
-#ifdef CONFIG_SCHEDSTATS
static void set_schedstats(bool enabled)
{
if (enabled)
if (!str)
goto out;
+ /*
+ * This code is called before jump labels have been set up, so we can't
+ * change the static branch directly just yet. Instead set a temporary
+ * variable so init_schedstats() can do it later.
+ */
if (!strcmp(str, "enable")) {
- set_schedstats(true);
+ __sched_schedstats = true;
ret = 1;
} else if (!strcmp(str, "disable")) {
- set_schedstats(false);
+ __sched_schedstats = false;
ret = 1;
}
out:
}
__setup("schedstats=", setup_schedstats);
+static void __init init_schedstats(void)
+{
+ set_schedstats(__sched_schedstats);
+}
+
#ifdef CONFIG_PROC_SYSCTL
int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
set_schedstats(state);
return err;
}
-#endif
-#endif
+#endif /* CONFIG_PROC_SYSCTL */
+#else /* !CONFIG_SCHEDSTATS */
+static inline void init_schedstats(void) {}
+#endif /* CONFIG_SCHEDSTATS */
/*
* fork()/clone()-time setup:
*/
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
- /* Post initialize new task's util average when its cfs_rq is set */
+ rq = __task_rq_lock(p, &rf);
post_init_entity_util_avg(&p->se);
- rq = __task_rq_lock(p, &rf);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
static inline void schedule_debug(struct task_struct *prev)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
- BUG_ON(task_stack_end_corrupted(prev));
+ if (task_stack_end_corrupted(prev))
+ panic("corrupted stack end detected inside scheduler\n");
#endif
if (unlikely(in_atomic_preempt_off())) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
+ * Also, reset softlockup watchdogs on all CPUs, because
+ * another CPU might be blocked waiting for us to process
+ * an IPI.
*/
touch_nmi_watchdog();
+ touch_all_softlockup_watchdogs();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
}
- touch_all_softlockup_watchdogs();
-
#ifdef CONFIG_SCHED_DEBUG
if (!state_filter)
sysrq_sched_debug_show();
/*
* Since this CPU is going 'away' for a while, fold any nr_active delta
* we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable.
+ * nr_active count is stable. We need to take the teardown thread which
+ * is calling this into account, so we hand in adjust = 1 to the load
+ * calculation.
*
* Also see the comment "Global load-average calculations".
*/
static void calc_load_migrate(struct rq *rq)
{
- long delta = calc_load_fold_active(rq);
+ long delta = calc_load_fold_active(rq, 1);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}
#endif
init_sched_fair_class();
+ init_schedstats();
+
scheduler_running = 1;
}
SPLIT_NS(p->se.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
-#ifdef CONFIG_SCHEDSTATS
- if (schedstat_enabled()) {
- SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
- SPLIT_NS(p->se.statistics.wait_sum),
- SPLIT_NS(p->se.sum_exec_runtime),
- SPLIT_NS(p->se.statistics.sum_sleep_runtime));
- }
-#else
+
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
- 0LL, 0L,
+ SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
SPLIT_NS(p->se.sum_exec_runtime),
- 0LL, 0L);
-#endif
+ SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
+
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
}
}
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
#else
void init_entity_runnable_average(struct sched_entity *se)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long tg_weight;
+ long tg_weight, load, shares;
/*
- * Use this CPU's real-time load instead of the last load contribution
- * as the updating of the contribution is delayed, and we will use the
- * the real-time load to calc the share. See update_tg_load_avg().
+ * This really should be: cfs_rq->avg.load_avg, but instead we use
+ * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+ * the shares for small weight interactive tasks.
*/
- tg_weight = atomic_long_read(&tg->load_avg);
- tg_weight -= cfs_rq->tg_load_avg_contrib;
- tg_weight += cfs_rq->load.weight;
+ load = scale_load_down(cfs_rq->load.weight);
- return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
- long tg_weight, load, shares;
+ tg_weight = atomic_long_read(&tg->load_avg);
- tg_weight = calc_tg_weight(tg, cfs_rq);
- load = cfs_rq->load.weight;
+ /* Ensure tg_weight >= load */
+ tg_weight -= cfs_rq->tg_load_avg_contrib;
+ tg_weight += load;
shares = (tg->shares * load);
if (tg_weight)
return tg->shares;
}
# endif /* CONFIG_SMP */
+
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
}
}
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ typeof(*ptr) val = (_val); \
+ typeof(*ptr) res, var = READ_ONCE(*ptr); \
+ res = var - val; \
+ if (res > var) \
+ res = 0; \
+ WRITE_ONCE(*ptr, res); \
+} while (0)
+
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
- sa->load_avg = max_t(long, sa->load_avg - r, 0);
- sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+ sub_positive(&sa->load_avg, r);
+ sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
removed_load = 1;
}
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
- sa->util_avg = max_t(long, sa->util_avg - r, 0);
- sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+ sub_positive(&sa->util_avg, r);
+ sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
removed_util = 1;
}
&se->avg, se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
- cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
- cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
- cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
- cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+ sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+ sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+ sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+ sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
cfs_rq_util_change(cfs_rq);
}
trace_sched_stat_iowait_enabled() ||
trace_sched_stat_blocked_enabled() ||
trace_sched_stat_runtime_enabled()) {
- pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+ printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
"stat_blocked and stat_runtime require the "
"kernel parameter schedstats=enabled or "
"kernel.sched_schedstats=1\n");
if (!cfs_bandwidth_used())
return;
+ /* Synchronize hierarchical throttle counter: */
+ if (unlikely(!cfs_rq->throttle_uptodate)) {
+ struct rq *rq = rq_of(cfs_rq);
+ struct cfs_rq *pcfs_rq;
+ struct task_group *tg;
+
+ cfs_rq->throttle_uptodate = 1;
+
+ /* Get closest up-to-date node, because leaves go first: */
+ for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+ pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+ if (pcfs_rq->throttle_uptodate)
+ break;
+ }
+ if (tg) {
+ cfs_rq->throttle_count = pcfs_rq->throttle_count;
+ cfs_rq->throttled_clock_task = rq_clock_task(rq);
+ }
+ }
+
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
+ /* Avoid re-evaluating load for this entity: */
+ se = parent_entity(se);
/*
* Bias pick_next to pick a task from this cfs_rq, as
* p is sleeping when it is within its sched_slice.
*/
- if (task_sleep && parent_entity(se))
- set_next_buddy(parent_entity(se));
-
- /* avoid re-evaluating load for this entity */
- se = parent_entity(se);
+ if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+ set_next_buddy(se);
break;
}
flags |= DEQUEUE_SLEEP;
return wl;
for_each_sched_entity(se) {
- long w, W;
+ struct cfs_rq *cfs_rq = se->my_q;
+ long W, w = cfs_rq_load_avg(cfs_rq);
- tg = se->my_q->tg;
+ tg = cfs_rq->tg;
/*
* W = @wg + \Sum rw_j
*/
- W = wg + calc_tg_weight(tg, se->my_q);
+ W = wg + atomic_long_read(&tg->load_avg);
+
+ /* Ensure \Sum rw_j >= rw_i */
+ W -= cfs_rq->tg_load_avg_contrib;
+ W += w;
/*
* w = rw_i + @wl
*/
- w = cfs_rq_load_avg(se->my_q) + wl;
+ w += wl;
/*
* wl = S * s'_i; see (2)
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
- struct cfs_rq *cfs_rq;
struct sched_entity *se;
+ struct cfs_rq *cfs_rq;
+ struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
+
+ raw_spin_lock_irq(&rq->lock);
post_init_entity_util_avg(se);
+ raw_spin_unlock_irq(&rq->lock);
}
return 1;
*/
static void cpuidle_idle_call(void)
{
- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
loads[2] = (avenrun[2] + offset) << shift;
}
-long calc_load_fold_active(struct rq *this_rq)
+long calc_load_fold_active(struct rq *this_rq, long adjust)
{
long nr_active, delta = 0;
- nr_active = this_rq->nr_running;
+ nr_active = this_rq->nr_running - adjust;
nr_active += (long)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
* We're going into NOHZ mode, if there's any pending delta, fold it
* into the pending idle delta.
*/
- delta = calc_load_fold_active(this_rq);
+ delta = calc_load_fold_active(this_rq, 0);
if (delta) {
int idx = calc_load_write_idx();
if (time_before(jiffies, this_rq->calc_load_update))
return;
- delta = calc_load_fold_active(this_rq);
+ delta = calc_load_fold_active(this_rq, 0);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
extern atomic_long_t calc_load_tasks;
extern void calc_global_load_tick(struct rq *this_rq);
-extern long calc_load_fold_active(struct rq *this_rq);
+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
#ifdef CONFIG_SMP
extern void cpu_load_update_active(struct rq *this_rq);
u64 throttled_clock, throttled_clock_task;
u64 throttled_clock_task_time;
- int throttled, throttle_count;
+ int throttled, throttle_count, throttle_uptodate;
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
+# define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0)
+
#else /* !CONFIG_SCHEDSTATS */
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
# define schedstat_inc(rq, field) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0)
# define schedstat_set(var, val) do { } while (0)
+# define schedstat_val(rq, field) 0
#endif
#ifdef CONFIG_SCHED_INFO
{
debug_object_free(timer, &hrtimer_debug_descr);
}
+EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
timer->it.cpu.expires = 0;
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
+ return;
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
unlock_task_sighand(p, &flags);
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
- file = (struct file *)array->ptrs[index];
+ file = READ_ONCE(array->ptrs[index]);
if (unlikely(!file))
return -ENOENT;
event->pmu->count)
return -EINVAL;
+ if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
+ event->attr.type != PERF_TYPE_RAW))
+ return -EINVAL;
+
/*
* we don't know if the function is run successfully by the
* return value. It can be judged in other places, such as
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
- file = (struct file *)array->ptrs[index];
+ file = READ_ONCE(array->ptrs[index]);
if (unlikely(!file))
return -ENOENT;
}
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
-static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
/* check bounds */
if (off < 0 || off >= sizeof(struct pt_regs))
}
}
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
{
struct trace_bprintk_fmt *pos;
+
+ if (!fmt)
+ return ERR_PTR(-EINVAL);
+
list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
if (!strcmp(pos->fmt, fmt))
return pos;
for (iter = start; iter < end; iter++) {
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
if (tb_fmt) {
- *iter = tb_fmt->fmt;
+ if (!IS_ERR(tb_fmt))
+ *iter = tb_fmt->fmt;
continue;
}
if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
return;
- /* is @cpu the only online CPU? */
cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
- if (cpumask_weight(&cpumask) != 1)
- return;
/* as we're called from CPU_ONLINE, the following shouldn't fail */
for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
- pool->attrs->cpumask) < 0);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
}
/*
If unsure, say N.
+config TEST_UUID
+ tristate "Test functions located in the uuid module at runtime"
+
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
default n
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_UUID) += test_uuid.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
--- /dev/null
+/*
+ * Test cases for lib/uuid.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+ const char *uuid;
+ uuid_le le;
+ uuid_be be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+ {
+ .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+ .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ },
+ {
+ .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+ .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ },
+ {
+ .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+ .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ },
+};
+
+static const char * const test_uuid_wrong_data[] = {
+ "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
+ "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
+ "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
+};
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
+ const char *data, const char *actual)
+{
+ pr_err("%s test #%u %s %s data: '%s'\n",
+ prefix,
+ total_tests,
+ wrong ? "passed on wrong" : "failed on",
+ be ? "BE" : "LE",
+ data);
+ if (actual && *actual)
+ pr_err("%s test #%u actual data: '%s'\n",
+ prefix,
+ total_tests,
+ actual);
+ failed_tests++;
+}
+
+static void __init test_uuid_test(const struct test_uuid_data *data)
+{
+ uuid_le le;
+ uuid_be be;
+ char buf[48];
+
+ /* LE */
+ total_tests++;
+ if (uuid_le_to_bin(data->uuid, &le))
+ test_uuid_failed("conversion", false, false, data->uuid, NULL);
+
+ total_tests++;
+ if (uuid_le_cmp(data->le, le)) {
+ sprintf(buf, "%pUl", &le);
+ test_uuid_failed("cmp", false, false, data->uuid, buf);
+ }
+
+ /* BE */
+ total_tests++;
+ if (uuid_be_to_bin(data->uuid, &be))
+ test_uuid_failed("conversion", false, true, data->uuid, NULL);
+
+ total_tests++;
+ if (uuid_be_cmp(data->be, be)) {
+ sprintf(buf, "%pUb", &be);
+ test_uuid_failed("cmp", false, true, data->uuid, buf);
+ }
+}
+
+static void __init test_uuid_wrong(const char *data)
+{
+ uuid_le le;
+ uuid_be be;
+
+ /* LE */
+ total_tests++;
+ if (!uuid_le_to_bin(data, &le))
+ test_uuid_failed("negative", true, false, data, NULL);
+
+ /* BE */
+ total_tests++;
+ if (!uuid_be_to_bin(data, &be))
+ test_uuid_failed("negative", true, true, data, NULL);
+}
+
+static int __init test_uuid_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
+ test_uuid_test(&test_uuid_test_data[i]);
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
+ test_uuid_wrong(test_uuid_wrong_data[i]);
+
+ if (failed_tests == 0)
+ pr_info("all %u tests passed\n", total_tests);
+ else
+ pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+ return failed_tests ? -EINVAL : 0;
+}
+module_init(test_uuid_init);
+
+static void __exit test_uuid_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_uuid_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
return -EINVAL;
for (i = 0; i < 16; i++) {
- int hi = hex_to_bin(uuid[si[i]] + 0);
- int lo = hex_to_bin(uuid[si[i]] + 1);
+ int hi = hex_to_bin(uuid[si[i] + 0]);
+ int lo = hex_to_bin(uuid[si[i] + 1]);
b[ei[i]] = (hi << 4) | lo;
}
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+ if (!isolated)
+ break;
+
total_isolated += isolated;
+ cc->nr_freepages += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
page++;
}
-
- /* If a page was split, advance to the end of it */
- if (isolated) {
- cc->nr_freepages += isolated;
- if (!strict &&
- cc->nr_migratepages <= cc->nr_freepages) {
- blockpfn += isolated;
- break;
- }
-
- blockpfn += isolated - 1;
- cursor += isolated - 1;
- continue;
+ if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+ blockpfn += isolated;
+ break;
}
+ /* Advance to the end of split page */
+ blockpfn += isolated - 1;
+ cursor += isolated - 1;
+ continue;
isolate_fail:
if (strict)
}
+ if (locked)
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+
/*
* There is a tiny chance that we have read bogus compound_order(),
* so be careful to not go outside of the pageblock.
if (strict && blockpfn < end_pfn)
total_isolated = 0;
- if (locked)
- spin_unlock_irqrestore(&cc->zone->lock, flags);
-
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
-
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need
continue;
/* Found a block suitable for isolating free pages from. */
- isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freelist, false);
+ isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+ freelist, false);
/*
- * If we isolated enough freepages, or aborted due to async
- * compaction being contended, terminate the loop.
- * Remember where the free scanner should restart next time,
- * which is where isolate_freepages_block() left off.
- * But if it scanned the whole pageblock, isolate_start_pfn
- * now points at block_end_pfn, which is the start of the next
- * pageblock.
- * In that case we will however want to restart at the start
- * of the previous pageblock.
+ * If we isolated enough freepages, or aborted due to lock
+ * contention, terminate.
*/
if ((cc->nr_freepages >= cc->nr_migratepages)
|| cc->contended) {
- if (isolate_start_pfn >= block_end_pfn)
+ if (isolate_start_pfn >= block_end_pfn) {
+ /*
+ * Restart at previous pageblock if more
+ * freepages can be isolated next time.
+ */
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
+ }
break;
- } else {
+ } else if (isolate_start_pfn < block_end_pfn) {
/*
- * isolate_freepages_block() should not terminate
- * prematurely unless contended, or isolated enough
+ * If isolation failed early, do not continue
+ * needlessly.
*/
- VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+ break;
}
}
*/
start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
end_index = (endbyte >> PAGE_SHIFT);
+ if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+ /* First page is tricky as 0 - 1 = -1, but pgoff_t
+ * is unsigned, so the end_index >= start_index
+ * check below would be true and we'll discard the whole
+ * file cache which is not what was asked.
+ */
+ if (end_index == 0)
+ break;
+
+ end_index--;
+ }
if (end_index >= start_index) {
unsigned long count = invalidate_mapping_pages(mapping,
if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--;
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
- do_set_pte(vma, addr, page, pte, false, false, true);
+ do_set_pte(vma, addr, page, pte, false, false);
unlock_page(page);
goto next;
unlock:
if (next - addr != HPAGE_PMD_SIZE) {
get_page(page);
spin_unlock(ptl);
- if (split_huge_page(page)) {
- put_page(page);
- unlock_page(page);
- goto out_unlocked;
- }
+ split_huge_page(page);
put_page(page);
unlock_page(page);
- ret = 1;
goto out_unlocked;
}
}
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze)
+ unsigned long address, bool freeze, struct page *page)
{
spinlock_t *ptl;
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
ptl = pmd_lock(mm, pmd);
+
+ /*
+ * If caller asks to setup a migration entries, we need a page to check
+ * pmd against. Otherwise we can end up replacing wrong page.
+ */
+ VM_BUG_ON(freeze && !page);
+ if (page && page != pmd_page(*pmd))
+ goto out;
+
if (pmd_trans_huge(*pmd)) {
- struct page *page = pmd_page(*pmd);
+ page = pmd_page(*pmd);
if (PageMlocked(page))
clear_page_mlock(page);
} else if (!pmd_devmap(*pmd))
return;
pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
- return;
- /*
- * If caller asks to setup a migration entries, we need a page to check
- * pmd against. Otherwise we can end up replacing wrong page.
- */
- VM_BUG_ON(freeze && !page);
- if (page && page != pmd_page(*pmd))
- return;
-
- /*
- * Caller holds the mmap_sem write mode or the anon_vma lock,
- * so a huge pmd cannot materialize from under us (khugepaged
- * holds both the mmap_sem write mode and the anon_vma lock
- * write mode).
- */
- __split_huge_pmd(vma, pmd, address, freeze);
+ __split_huge_pmd(vma, pmd, address, freeze, page);
}
void vma_adjust_trans_huge(struct vm_area_struct *vma,
* Only the process that called mmap() has reserves for
* private mappings.
*/
- if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
- return true;
+ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ /*
+ * Like the shared case above, a hole punch or truncate
+ * could have been performed on the private mapping.
+ * Examine the value of chg to determine if reserves
+ * actually exist or were previously consumed.
+ * Very Subtle - The value of chg comes from a previous
+ * call to vma_needs_reserves(). The reserve map for
+ * private mappings has different (opposite) semantics
+ * than that of shared mappings. vma_needs_reserves()
+ * has already taken this difference in semantics into
+ * account. Therefore, the meaning of chg is the same
+ * as in the shared case above. Code could easily be
+ * combined, but keeping it separate draws attention to
+ * subtle differences.
+ */
+ if (chg)
+ return false;
+ else
+ return true;
+ }
return false;
}
int nr_pages = 1 << order;
struct page *p = page + 1;
+ atomic_set(compound_mapcount_ptr(page), 0);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
clear_compound_head(p);
set_page_refcounted(p);
if (vma->vm_flags & VM_MAYSHARE)
return ret;
+ else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+ /*
+ * In most cases, reserves always exist for private mappings.
+ * However, a file associated with mapping could have been
+ * hole punched or truncated after reserves were consumed.
+ * As subsequent fault on such a range will not use reserves.
+ * Subtle - The reserve map for private mappings has the
+ * opposite meaning than that of shared mappings. If NO
+ * entry is in the reserve map, it means a reservation exists.
+ * If an entry exists in the reserve map, it means the
+ * reservation has already been consumed. As a result, the
+ * return value of this routine is the opposite of the
+ * value returned from reserve map manipulation routines above.
+ */
+ if (ret)
+ return 0;
+ else
+ return 1;
+ }
else
return ret < 0 ? ret : 0;
}
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
- page_move_anon_rmap(old_page, vma, address);
+ page_move_anon_rmap(old_page, vma);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
if (saddr) {
spte = huge_pte_offset(svma->vm_mm, saddr);
if (spte) {
- mm_inc_nr_pmds(mm);
get_page(virt_to_page(spte));
break;
}
if (pud_none(*pud)) {
pud_populate(mm, pud,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
+ mm_inc_nr_pmds(mm);
} else {
put_page(virt_to_page(spte));
- mm_inc_nr_pmds(mm);
}
spin_unlock(ptl);
out:
*/
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
- __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
+ __GFP_ATOMIC)
/* The GFP flags allowed during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
kasan_kmalloc(cache, object, cache->object_size, flags);
}
-void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
+static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
{
unsigned long size = cache->object_size;
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
kasan_kmalloc(page->slab_cache, object, size, flags);
}
-void kasan_kfree(void *ptr)
+void kasan_poison_kfree(void *ptr)
{
struct page *page;
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
else
- kasan_slab_free(page->slab_cache, ptr);
+ kasan_poison_slab_free(page->slab_cache, ptr);
}
void kasan_kfree_large(const void *ptr)
static int __init kasan_memhotplug_init(void)
{
- pr_err("WARNING: KASAN doesn't support memory hot-add\n");
- pr_err("Memory hot-add will be disabled\n");
+ pr_info("WARNING: KASAN doesn't support memory hot-add\n");
+ pr_info("Memory hot-add will be disabled\n");
hotplug_memory_notifier(kasan_mem_notifier, 0);
struct qlist_head *to,
struct kmem_cache *cache)
{
- struct qlist_node *prev = NULL, *curr;
+ struct qlist_node *curr;
if (unlikely(qlist_empty(from)))
return;
curr = from->head;
+ qlist_init(from);
while (curr) {
- struct qlist_node *qlink = curr;
- struct kmem_cache *obj_cache = qlink_to_cache(qlink);
-
- if (obj_cache == cache) {
- if (unlikely(from->head == qlink)) {
- from->head = curr->next;
- prev = curr;
- } else
- prev->next = curr->next;
- if (unlikely(from->tail == qlink))
- from->tail = curr->next;
- from->bytes -= cache->size;
- qlist_put(to, qlink, cache->size);
- } else {
- prev = curr;
- }
- curr = curr->next;
+ struct qlist_node *next = curr->next;
+ struct kmem_cache *obj_cache = qlink_to_cache(curr);
+
+ if (obj_cache == cache)
+ qlist_put(to, curr, obj_cache->size);
+ else
+ qlist_put(from, curr, obj_cache->size);
+
+ curr = next;
}
}
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
seq_printf(seq, " hex dump (first %zu bytes):\n", len);
+ kasan_disable_current();
seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+ kasan_enable_current();
}
/*
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
{
- if (!current->memcg_may_oom || current->memcg_in_oom)
+ if (!current->memcg_may_oom)
return;
/*
* We are in the middle of the charge context here, so we
* ordering is imposed by list_lru_node->lock taken by
* memcg_drain_all_list_lrus().
*/
+ rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
css_for_each_descendant_pre(css, &memcg->css) {
child = mem_cgroup_from_css(css);
BUG_ON(child->kmemcg_id != kmemcg_id);
if (!memcg->use_hierarchy)
break;
}
+ rcu_read_unlock();
+
memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
memcg_free_cache_id(kmemcg_id);
{ }, /* terminate */
};
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+ atomic_inc(&memcg->id.ref);
+}
+
+static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+ if (atomic_dec_and_test(&memcg->id.ref)) {
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
+ memcg->id.id = 0;
+
+ /* Memcg ID pins CSS */
+ css_put(&memcg->css);
+ }
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return idr_find(&mem_cgroup_idr, id);
+}
+
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
if (!memcg)
return NULL;
+ memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+ 1, MEM_CGROUP_ID_MAX,
+ GFP_KERNEL);
+ if (memcg->id.id < 0)
+ goto fail;
+
memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
if (!memcg->stat)
goto fail;
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
+ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
return memcg;
fail:
+ if (memcg->id.id > 0)
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
mem_cgroup_free(memcg);
return NULL;
}
return &memcg->css;
fail:
mem_cgroup_free(memcg);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
- if (css->id > MEM_CGROUP_ID_MAX)
- return -ENOSPC;
-
+ /* Online state pins memcg ID, memcg ID pins CSS */
+ mem_cgroup_id_get(mem_cgroup_from_css(css));
+ css_get(css);
return 0;
}
memcg_offline_kmem(memcg);
wb_memcg_offline(memcg);
+
+ mem_cgroup_id_put(memcg);
}
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
struct mem_cgroup *memcg;
unsigned int nr_pages;
bool compound;
+ unsigned long flags;
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
commit_charge(newpage, memcg, false);
- local_irq_disable();
+ local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
memcg_check_events(memcg, newpage);
- local_irq_enable();
+ local_irq_restore(flags);
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
if (!memcg)
return;
+ mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, page, false, -1);
memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put(&memcg->css);
}
/*
!page_counter_try_charge(&memcg->swap, 1, &counter))
return -ENOMEM;
+ mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
- css_get(&memcg->css);
return 0;
}
page_counter_uncharge(&memcg->memsw, 1);
}
mem_cgroup_swap_statistics(memcg, false);
- css_put(&memcg->css);
+ mem_cgroup_id_put(memcg);
}
rcu_read_unlock();
}
* Protected against the rmap code by
* the page lock.
*/
- page_move_anon_rmap(compound_head(old_page),
- vma, address);
+ page_move_anon_rmap(old_page, vma);
}
unlock_page(old_page);
return wp_page_reuse(mm, vma, address, page_table, ptl,
* vm_ops->map_pages.
*/
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
- struct page *page, pte_t *pte, bool write, bool anon, bool old)
+ struct page *page, pte_t *pte, bool write, bool anon)
{
pte_t entry;
entry = mk_pte(page, vma->vm_page_prot);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (old)
- entry = pte_mkold(entry);
if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address, false);
update_mmu_cache(vma, address, pte);
}
-/*
- * If architecture emulates "accessed" or "young" bit without HW support,
- * there is no much gain with fault_around.
- */
static unsigned long fault_around_bytes __read_mostly =
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
- PAGE_SIZE;
-#else
rounddown_pow_of_two(65536);
-#endif
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (!pte_same(*pte, orig_pte))
- goto unlock_out;
do_fault_around(vma, address, pte, pgoff, flags);
- /* Check if the fault is handled by faultaround */
- if (!pte_same(*pte, orig_pte)) {
- /*
- * Faultaround produce old pte, but the pte we've
- * handler fault for should be young.
- */
- pte_t entry = pte_mkyoung(*pte);
- if (ptep_set_access_flags(vma, address, pte, entry, 0))
- update_mmu_cache(vma, address, pte);
+ if (!pte_same(*pte, orig_pte))
goto unlock_out;
- }
pte_unmap_unlock(pte, ptl);
}
put_page(fault_page);
return ret;
}
- do_set_pte(vma, address, fault_page, pte, false, false, false);
+ do_set_pte(vma, address, fault_page, pte, false, false);
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
}
goto uncharge_out;
}
- do_set_pte(vma, address, new_page, pte, true, true, false);
+ do_set_pte(vma, address, new_page, pte, true, true);
mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
put_page(fault_page);
return ret;
}
- do_set_pte(vma, address, fault_page, pte, true, false, false);
+ do_set_pte(vma, address, fault_page, pte, true, false);
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
static void kasan_poison_element(mempool_t *pool, void *element)
{
- if (pool->alloc == mempool_alloc_slab)
- kasan_poison_slab_free(pool->pool_data, element);
- if (pool->alloc == mempool_kmalloc)
- kasan_kfree(element);
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ kasan_poison_kfree(element);
if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{
- if (pool->alloc == mempool_alloc_slab)
- kasan_slab_alloc(pool->pool_data, element, flags);
- if (pool->alloc == mempool_kmalloc)
- kasan_krealloc(element, (size_t)pool->pool_data, flags);
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ kasan_unpoison_slab(element);
if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
return MIGRATEPAGE_SUCCESS;
}
+EXPORT_SYMBOL(migrate_page_move_mapping);
/*
* The expected number of remaining references is the same as that
mem_cgroup_migrate(page, newpage);
}
+EXPORT_SYMBOL(migrate_page_copy);
/************************************************************
* Migration functions
p = find_lock_task_mm(tsk);
if (!p)
goto unlock_oom;
-
mm = p->mm;
- if (!atomic_inc_not_zero(&mm->mm_users)) {
- task_unlock(p);
- goto unlock_oom;
- }
-
+ atomic_inc(&mm->mm_users);
task_unlock(p);
if (!down_read_trylock(&mm->mmap_sem)) {
if (atomic_read(&mm->mm_users) > 1) {
rcu_read_lock();
for_each_process(p) {
- bool exiting;
-
if (!process_shares_mm(p, mm))
continue;
if (fatal_signal_pending(p))
* If the task is exiting make sure the whole thread group
* is exiting and cannot acces mm anymore.
*/
- spin_lock_irq(&p->sighand->siglock);
- exiting = signal_group_exit(p->signal);
- spin_unlock_irq(&p->sighand->siglock);
- if (exiting)
+ if (signal_group_exit(p->signal))
continue;
/* Give up */
struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
unsigned long bytes = vm_dirty_bytes;
unsigned long bg_bytes = dirty_background_bytes;
- unsigned long ratio = vm_dirty_ratio;
- unsigned long bg_ratio = dirty_background_ratio;
+ /* convert ratios to per-PAGE_SIZE for higher precision */
+ unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
+ unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
unsigned long thresh;
unsigned long bg_thresh;
struct task_struct *tsk;
/*
* The byte settings can't be applied directly to memcg
* domains. Convert them to ratios by scaling against
- * globally available memory.
+ * globally available memory. As the ratios are in
+ * per-PAGE_SIZE, they can be obtained by dividing bytes by
+ * number of pages.
*/
if (bytes)
- ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
- global_avail, 100UL);
+ ratio = min(DIV_ROUND_UP(bytes, global_avail),
+ PAGE_SIZE);
if (bg_bytes)
- bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
- global_avail, 100UL);
+ bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
+ PAGE_SIZE);
bytes = bg_bytes = 0;
}
if (bytes)
thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
else
- thresh = (ratio * available_memory) / 100;
+ thresh = (ratio * available_memory) / PAGE_SIZE;
if (bg_bytes)
bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
else
- bg_thresh = (bg_ratio * available_memory) / 100;
+ bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
- if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+ int nid = early_pfn_to_nid(pfn);
+
+ if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
return true;
return false;
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
INIT_LIST_HEAD(&page->lru);
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
set_page_private(page, 0);
spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
if (nid < 0)
- nid = 0;
+ nid = first_online_node;
spin_unlock(&early_pfn_lock);
return nid;
page = list_last_entry(list, struct page, lru);
else
page = list_first_entry(list, struct page, lru);
- } while (page && check_new_pcp(page));
- __dec_zone_state(zone, NR_ALLOC_BATCH);
- list_del(&page->lru);
- pcp->count--;
+ __dec_zone_state(zone, NR_ALLOC_BATCH);
+ list_del(&page->lru);
+ pcp->count--;
+
+ } while (check_new_pcp(page));
} else {
/*
* We most definitely don't want callers attempting to
apply_fair = false;
fair_skipped = false;
reset_alloc_batches(ac->preferred_zoneref->zone);
+ z = ac->preferred_zoneref;
goto zonelist_scan;
}
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
+ /*
+ * Reset the zonelist iterators if memory policies can be ignored.
+ * These allocations are high priority and system rather than user
+ * orientated.
+ */
+ if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
+ ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, ac->nodemask);
+ }
+
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, order,
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
/* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) {
- /*
- * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
- * the allocation is high priority and these type of
- * allocations are system rather than user orientated
- */
- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
page = get_page_from_freelist(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac);
if (page)
/* Dirty zone balancing only done in the fast path */
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
- /* The preferred zone is used for statistics later */
+ /*
+ * The preferred zone is used for statistics but crucially it is
+ * also used as the starting point for the zonelist iterator. It
+ * may get reset for allocations that ignore memory policies.
+ */
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
ac.high_zoneidx, ac.nodemask);
if (!ac.preferred_zoneref) {
for (i = 0; i < (1 << order); i++) {
page_ext = lookup_page_ext(page + i);
+ if (unlikely(!page_ext))
+ continue;
__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
}
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
+
struct stack_trace trace = {
.nr_entries = 0,
.max_entries = ARRAY_SIZE(page_ext->trace_entries),
.skip = 3,
};
+ if (unlikely(!page_ext))
+ return;
+
save_stack_trace(&trace);
page_ext->order = order;
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
page_ext->last_migrate_reason = reason;
}
gfp_t __get_page_owner_gfp(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ /*
+ * The caller just returns 0 if no valid gfp
+ * So return 0 here too.
+ */
+ return 0;
return page_ext->gfp_mask;
}
struct page_ext *new_ext = lookup_page_ext(newpage);
int i;
+ if (unlikely(!old_ext || !new_ext))
+ return;
+
new_ext->order = old_ext->order;
new_ext->gfp_mask = old_ext->gfp_mask;
new_ext->nr_entries = old_ext->nr_entries;
.nr_entries = page_ext->nr_entries,
.entries = &page_ext->trace_entries[0],
};
- gfp_t gfp_mask = page_ext->gfp_mask;
- int mt = gfpflags_to_migratetype(gfp_mask);
+ gfp_t gfp_mask;
+ int mt;
+
+ if (unlikely(!page_ext)) {
+ pr_alert("There is not page extension available.\n");
+ return;
+ }
+ gfp_mask = page_ext->gfp_mask;
+ mt = gfpflags_to_migratetype(gfp_mask);
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
pr_alert("page_owner info is not active (free page?)\n");
}
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
/*
* Some pages could be missed by concurrent allocation or free,
continue;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
/* Maybe overraping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
struct page_ext *page_ext;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
struct page_ext *page_ext;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
struct page_ext *page_ext;
page_ext = lookup_page_ext(page);
- if (!page_ext)
+ if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
int map_used; /* # of map entries used before the sentry */
int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */
- struct work_struct map_extend_work;/* async ->map[] extension */
+ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
void *data; /* chunk data */
int first_free; /* no free below this */
static int pcpu_reserved_chunk_limit;
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+/* chunks which need their map areas extended, protected by pcpu_lock */
+static LIST_HEAD(pcpu_map_extend_chunks);
+
/*
* The number of empty populated pages, protected by pcpu_lock. The
* reserved chunk doesn't contribute to the count.
{
int margin, new_alloc;
+ lockdep_assert_held(&pcpu_lock);
+
if (is_atomic) {
margin = 3;
if (chunk->map_alloc <
- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
- pcpu_async_enabled)
- schedule_work(&chunk->map_extend_work);
+ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
+ if (list_empty(&chunk->map_extend_list)) {
+ list_add_tail(&chunk->map_extend_list,
+ &pcpu_map_extend_chunks);
+ pcpu_schedule_balance_work();
+ }
+ }
} else {
margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
}
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
+ lockdep_assert_held(&pcpu_alloc_mutex);
+
new = pcpu_mem_zalloc(new_size);
if (!new)
return -ENOMEM;
return 0;
}
-static void pcpu_map_extend_workfn(struct work_struct *work)
-{
- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
- map_extend_work);
- int new_alloc;
-
- spin_lock_irq(&pcpu_lock);
- new_alloc = pcpu_need_to_extend(chunk, false);
- spin_unlock_irq(&pcpu_lock);
-
- if (new_alloc)
- pcpu_extend_area_map(chunk, new_alloc);
-}
-
/**
* pcpu_fit_in_area - try to fit the requested allocation in a candidate area
* @chunk: chunk the candidate area belongs to
chunk->map_used = 1;
INIT_LIST_HEAD(&chunk->list);
- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
+ INIT_LIST_HEAD(&chunk->map_extend_list);
chunk->free_size = pcpu_unit_size;
chunk->contig_hint = pcpu_unit_size;
return NULL;
}
+ if (!is_atomic)
+ mutex_lock(&pcpu_alloc_mutex);
+
spin_lock_irqsave(&pcpu_lock, flags);
/* serve reserved allocations from the reserved chunk if available */
if (is_atomic)
goto fail;
- mutex_lock(&pcpu_alloc_mutex);
-
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
chunk = pcpu_create_chunk();
if (!chunk) {
- mutex_unlock(&pcpu_alloc_mutex);
err = "failed to allocate new chunk";
goto fail;
}
spin_lock_irqsave(&pcpu_lock, flags);
}
- mutex_unlock(&pcpu_alloc_mutex);
goto restart;
area_found:
if (!is_atomic) {
int page_start, page_end, rs, re;
- mutex_lock(&pcpu_alloc_mutex);
-
page_start = PFN_DOWN(off);
page_end = PFN_UP(off + size);
spin_lock_irqsave(&pcpu_lock, flags);
if (ret) {
- mutex_unlock(&pcpu_alloc_mutex);
pcpu_free_area(chunk, off, &occ_pages);
err = "failed to populate";
goto fail_unlock;
/* see the flag handling in pcpu_blance_workfn() */
pcpu_atomic_alloc_failed = true;
pcpu_schedule_balance_work();
+ } else {
+ mutex_unlock(&pcpu_alloc_mutex);
}
return NULL;
}
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
continue;
+ list_del_init(&chunk->map_extend_list);
list_move(&chunk->list, &to_free);
}
pcpu_destroy_chunk(chunk);
}
+ /* service chunks which requested async area map extension */
+ do {
+ int new_alloc = 0;
+
+ spin_lock_irq(&pcpu_lock);
+
+ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
+ struct pcpu_chunk, map_extend_list);
+ if (chunk) {
+ list_del_init(&chunk->map_extend_list);
+ new_alloc = pcpu_need_to_extend(chunk, false);
+ }
+
+ spin_unlock_irq(&pcpu_lock);
+
+ if (new_alloc)
+ pcpu_extend_area_map(chunk, new_alloc);
+ } while (chunk);
+
/*
* Ensure there are certain number of free populated pages for
* atomic allocs. Fill up from the most packed so that atomic
*/
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list);
- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
+ INIT_LIST_HEAD(&schunk->map_extend_list);
schunk->base_addr = base_addr;
schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap);
if (dyn_size) {
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list);
- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
+ INIT_LIST_HEAD(&dchunk->map_extend_list);
dchunk->base_addr = base_addr;
dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap);
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
* @vma: the vma the page belongs to
- * @address: the user virtual address mapped
*
* When a page belongs exclusively to one process after a COW event,
* that page can be moved into the anon_vma that belongs to just that
* process, so the rmap code will not search the parent or sibling
* processes.
*/
-void page_move_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
+ page = compound_head(page);
+
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_VMA(!anon_vma, vma);
- if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
- address &= HPAGE_PMD_MASK;
- VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
/*
goto out;
}
- pte = page_check_address(page, mm, address, &ptl, 0);
+ pte = page_check_address(page, mm, address, &ptl,
+ PageTransCompound(page));
if (!pte)
goto out;
error = shmem_getpage(inode, index, &page, SGP_FALLOC);
if (error) {
/* Remove the !PageUptodate pages we added */
- shmem_undo_range(inode,
- (loff_t)start << PAGE_SHIFT,
- (loff_t)index << PAGE_SHIFT, true);
+ if (index > start) {
+ shmem_undo_range(inode,
+ (loff_t)start << PAGE_SHIFT,
+ ((loff_t)index << PAGE_SHIFT) - 1, true);
+ }
goto undone;
}
goto out_unlock;
cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
- cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
- css->id, memcg_name_buf);
+ cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+ css->serial_nr, memcg_name_buf);
if (!cache_name)
goto out_unlock;
get_page(page);
local_irq_save(flags);
pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_move_tail(pvec);
local_irq_restore(flags);
}
struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
get_page(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
put_cpu_var(activate_page_pvecs);
}
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
get_page(page);
- if (!pagevec_space(pvec))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
- pagevec_add(pvec, page);
put_cpu_var(lru_add_pvec);
}
if (likely(get_page_unless_zero(page))) {
struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
put_cpu_var(lru_deactivate_file_pvecs);
}
struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
get_page(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
put_cpu_var(lru_deactivate_pvecs);
}
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+/*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+ * workqueue, aiding in getting memory freed.
+ */
+static struct workqueue_struct *lru_add_drain_wq;
+
+static int __init lru_init(void)
+{
+ lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
+
+ if (WARN(!lru_add_drain_wq,
+ "Failed to create workqueue lru_add_drain_wq"))
+ return -ENOMEM;
+
+ return 0;
+}
+early_initcall(lru_init);
+
void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
- schedule_work_on(cpu, work);
+ queue_work_on(cpu, lru_add_drain_wq, work);
cpumask_set_cpu(cpu, &has_work);
}
}
void free_page_and_swap_cache(struct page *page)
{
free_swap_cache(page);
- put_page(page);
+ if (is_huge_zero_page(page))
+ put_huge_zero_page();
+ else
+ put_page(page);
}
/*
*/
void vm_unmap_ram(const void *mem, unsigned int count)
{
- unsigned long size = count << PAGE_SHIFT;
+ unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr = (unsigned long)mem;
BUG_ON(!addr);
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
- unsigned long size = count << PAGE_SHIFT;
+ unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr;
void *mem;
unsigned long flags, pgprot_t prot)
{
struct vm_struct *area;
+ unsigned long size; /* In bytes */
might_sleep();
if (count > totalram_pages)
return NULL;
- area = get_vm_area_caller((count << PAGE_SHIFT), flags,
- __builtin_return_address(0));
+ size = (unsigned long)count << PAGE_SHIFT;
+ area = get_vm_area_caller(size, flags, __builtin_return_address(0));
if (!area)
return NULL;
continue;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
max_order = fls_long(totalram_pages - 1);
if (max_order > timestamp_bits)
bucket_order = max_order - timestamp_bits;
- printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+ pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
/* HEADLESS page stored */
bud = HEADLESS;
} else {
- bud = (handle - zhdr->first_num) & BUDDY_MASK;
+ bud = handle_to_buddy(handle);
switch (bud) {
case FIRST:
pool->pages_nr--;
spin_unlock(&pool->lock);
return 0;
- } else if (zhdr->first_chunks != 0 &&
- zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) {
- /* Full, add to buddied list */
- list_add(&zhdr->buddy, &pool->buddied);
- } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
- z3fold_compact_page(zhdr);
- /* add to unbuddied list */
- freechunks = num_free_chunks(zhdr);
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
+ if (zhdr->first_chunks != 0 &&
+ zhdr->last_chunks != 0 &&
+ zhdr->middle_chunks != 0) {
+ /* Full, add to buddied list */
+ list_add(&zhdr->buddy, &pool->buddied);
+ } else {
+ z3fold_compact_page(zhdr);
+ /* add to unbuddied list */
+ freechunks = num_free_chunks(zhdr);
+ list_add(&zhdr->buddy,
+ &pool->unbuddied[freechunks]);
+ }
}
/* add to beginning of LRU */
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
+ /* vlan continues to inherit address of lower device */
+ if (vlan_dev_inherit_address(vlandev, dev))
+ goto out;
+
/* vlan address was different from the old address and is equal to
* the new address */
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
+out:
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
}
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
u16 vlan_tci)
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
- /* TODO: gotta make sure the underlying layer can handle it,
- * maybe an IFF_VLAN_CAPABLE flag for devices?
- */
- if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ unsigned int max_mtu = real_dev->mtu;
+
+ if (netif_reduces_vlan_mtu(real_dev))
+ max_mtu -= VLAN_HLEN;
+ if (max_mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
}
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev)
+{
+ if (dev->addr_assign_type != NET_ADDR_STOLEN)
+ return false;
+
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return true;
+}
+
static int vlan_dev_open(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
- if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+ !vlan_dev_inherit_address(dev, real_dev)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
- if (is_zero_ether_addr(dev->dev_addr))
- eth_hw_addr_inherit(dev, real_dev);
+ if (is_zero_ether_addr(dev->dev_addr)) {
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_STOLEN;
+ }
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev;
+ unsigned int max_mtu;
__be16 proto;
int err;
if (err < 0)
return err;
+ max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+ real_dev->mtu;
if (!tb[IFLA_MTU])
- dev->mtu = real_dev->mtu;
- else if (dev->mtu > real_dev->mtu)
+ dev->mtu = max_mtu;
+ else if (dev->mtu > max_mtu)
return -EINVAL;
err = vlan_changelink(dev, tb, data);
break;
case as_addparty:
case as_dropparty:
- sk->sk_err_soft = msg->reply;
+ sk->sk_err_soft = -msg->reply;
/* < 0 failure, otherwise ep_ref */
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
- error = xchg(&sk->sk_err_soft, 0);
+ error = -xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
error = -EUNATCH;
goto out;
}
- error = xchg(&sk->sk_err_soft, 0);
+ error = -xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
- ax25_destroy_socket(ax25);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:
switch (ax25->state) {
case AX25_STATE_0:
+ case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
+ /* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
- ax25_disconnect(ax25, ETIMEDOUT);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
switch (ax25->state) {
case AX25_STATE_0:
+ case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
+ /* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
- ax25_disconnect(ax25, ETIMEDOUT);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
{
ax25_clear_queues(ax25);
- ax25_stop_heartbeat(ax25);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
static void batadv_claim_release(struct kref *ref)
{
struct batadv_bla_claim *claim;
+ struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
- batadv_backbone_gw_put(claim->backbone_gw);
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
+ claim->backbone_gw = NULL;
+ spin_unlock_bh(&claim->backbone_lock);
+
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
kfree_rcu(claim, rcu);
}
break;
}
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb)
+ goto out;
+ }
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface);
const u8 *mac, const unsigned short vid,
struct batadv_bla_backbone_gw *backbone_gw)
{
+ struct batadv_bla_backbone_gw *old_backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_bla_claim search_claim;
+ bool remove_crc = false;
int hash_added;
ether_addr_copy(search_claim.addr, mac);
return;
ether_addr_copy(claim->addr, mac);
+ spin_lock_init(&claim->backbone_lock);
claim->vid = vid;
claim->lasttime = jiffies;
+ kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount);
"bla_add_claim(): changing ownership for %pM, vid %d\n",
mac, BATADV_PRINT_VID(vid));
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
- batadv_backbone_gw_put(claim->backbone_gw);
+ remove_crc = true;
}
- /* set (new) backbone gw */
+
+ /* replace backbone_gw atomically and adjust reference counters */
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
+ spin_unlock_bh(&claim->backbone_lock);
+ if (remove_crc) {
+ /* remove claim address from old backbone_gw */
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+ }
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
+ /* add claim address to new backbone_gw */
spin_lock_bh(&backbone_gw->crc_lock);
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&backbone_gw->crc_lock);
batadv_claim_put(claim);
}
+/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ spin_lock_bh(&claim->backbone_lock);
+ backbone_gw = claim->backbone_gw;
+ kref_get(&backbone_gw->refcount);
+ spin_unlock_bh(&claim->backbone_lock);
+
+ return backbone_gw;
+}
+
/**
* batadv_bla_del_claim - delete a claim from the claim hash
* @bat_priv: the bat priv with all the soft interface information
batadv_choose_claim, claim);
batadv_claim_put(claim); /* reference from the hash is gone */
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
struct batadv_hard_iface *primary_if,
int now)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct hlist_head *head;
struct batadv_hashtable *hash;
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
if (now)
goto purge_now;
- if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+ if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
- continue;
+ goto skip;
+
if (!batadv_has_timed_out(claim->lasttime,
BATADV_BLA_CLAIM_TIMEOUT))
- continue;
+ goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_purge_claims(): %pM, vid %d, time out\n",
purge_now:
batadv_handle_unclaim(bat_priv, primary_if,
- claim->backbone_gw->orig,
+ backbone_gw->orig,
claim->addr, claim->vid);
+skip:
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, bool is_bcast)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
+ bool own_claim;
bool ret;
ethhdr = eth_hdr(skb);
}
/* if it is our own claim ... */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ own_claim = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (own_claim) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
{
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
+ bool client_roamed;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
goto allow;
/* check if we are responsible. */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ client_roamed = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (client_roamed) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+ is_own = batadv_compare_eth(backbone_gw->orig,
primary_addr);
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- backbone_crc = claim->backbone_gw->crc;
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
claim->addr, BATADV_PRINT_VID(claim->vid),
- claim->backbone_gw->orig,
+ backbone_gw->orig,
(is_own ? 'x' : ' '),
backbone_crc);
+
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
if (!skb_new)
goto out;
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb_new)
+ goto out;
+ }
skb_reset_mac_header(skb_new);
skb_new->protocol = eth_type_trans(skb_new,
*/
skb_reset_mac_header(skb_new);
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb_new)
+ goto out;
+ }
/* To preserve backwards compatibility, the node has choose the outgoing
* format based on the incoming request packet type. The assumption is
struct batadv_neigh_node *neigh_node;
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_orig_node_vlan *vlan;
+ struct batadv_orig_ifinfo *last_candidate;
orig_node = container_of(ref, struct batadv_orig_node, refcount);
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_put(orig_ifinfo);
}
+
+ last_candidate = orig_node->last_bonding_candidate;
+ orig_node->last_bonding_candidate = NULL;
spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (last_candidate)
+ batadv_orig_ifinfo_put(last_candidate);
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+ hlist_del_rcu(&vlan->list);
+ batadv_orig_node_vlan_put(vlan);
+ }
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
+ ethhdr = eth_hdr(skb);
icmph = (struct batadv_icmp_header *)skb->data;
icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
return 0;
}
+/**
+ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+ struct batadv_orig_ifinfo *new_candidate)
+{
+ struct batadv_orig_ifinfo *old_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ old_candidate = orig_node->last_bonding_candidate;
+
+ if (new_candidate)
+ kref_get(&new_candidate->refcount);
+ orig_node->last_bonding_candidate = new_candidate;
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ if (old_candidate)
+ batadv_orig_ifinfo_put(old_candidate);
+}
+
/**
* batadv_find_router - find a suitable router for this originator
* @bat_priv: the bat priv with all the soft interface information
}
rcu_read_unlock();
- /* last_bonding_candidate is reset below, remove the old reference. */
- if (orig_node->last_bonding_candidate)
- batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
-
/* After finding candidates, handle the three cases:
* 1) there is a next candidate, use that
* 2) there is no next candidate, use the first of the list
if (next_candidate) {
batadv_neigh_node_put(router);
- /* remove references to first candidate, we don't need it. */
- if (first_candidate) {
- batadv_neigh_node_put(first_candidate_router);
- batadv_orig_ifinfo_put(first_candidate);
- }
+ kref_get(&next_candidate_router->refcount);
router = next_candidate_router;
- orig_node->last_bonding_candidate = next_candidate;
+ batadv_last_bonding_replace(orig_node, next_candidate);
} else if (first_candidate) {
batadv_neigh_node_put(router);
- /* refcounting has already been done in the loop above. */
+ kref_get(&first_candidate_router->refcount);
router = first_candidate_router;
- orig_node->last_bonding_candidate = first_candidate;
+ batadv_last_bonding_replace(orig_node, first_candidate);
} else {
- orig_node->last_bonding_candidate = NULL;
+ batadv_last_bonding_replace(orig_node, NULL);
+ }
+
+ /* cleanup of candidates */
+ if (first_candidate) {
+ batadv_neigh_node_put(first_candidate_router);
+ batadv_orig_ifinfo_put(first_candidate);
+ }
+
+ if (next_candidate) {
+ batadv_neigh_node_put(next_candidate_router);
+ batadv_orig_ifinfo_put(next_candidate);
}
return router;
struct batadv_orig_node *orig_node;
orig_node = batadv_gw_get_selected_orig(bat_priv);
- return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
- orig_node, vid);
+ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+ BATADV_P_DATA, orig_node, vid);
}
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
struct list_head *head)
{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *hard_iface;
+ struct batadv_softif_vlan *vlan;
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface == soft_iface)
BATADV_IF_CLEANUP_KEEP);
}
+ /* destroy the "untagged" VLAN */
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (vlan) {
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+ batadv_softif_vlan_put(vlan);
+ }
+
batadv_sysfs_del_meshif(soft_iface);
unregister_netdevice_queue(soft_iface, head);
}
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
- if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
- addr, BATADV_PRINT_VID(vid))) {
+ if (!vlan) {
+ net_ratelimited_function(batadv_info, soft_iface,
+ "adding TT local entry %pM to non-existent VLAN %d\n",
+ addr, BATADV_PRINT_VID(vid));
kfree(tt_local);
tt_local = NULL;
goto out;
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_put(tt_local);
- batadv_softif_vlan_put(vlan);
goto out;
}
return crc;
}
+/**
+ * batadv_tt_req_node_release - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+ struct batadv_tt_req_node *tt_req_node;
+
+ tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
+
+ kfree(tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
+{
+ kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node;
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
if (!tt_req_node)
goto unlock;
+ kref_init(&tt_req_node->refcount);
ether_addr_copy(tt_req_node->addr, orig_node->orig);
tt_req_node->issued_at = jiffies;
+ kref_get(&tt_req_node->refcount);
hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
spin_unlock_bh(&bat_priv->tt.req_list_lock);
out:
if (primary_if)
batadv_hardif_put(primary_if);
+
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
- /* hlist_del_init() verifies tt_req_node still is in the list */
- hlist_del_init(&tt_req_node->list);
+ if (!hlist_unhashed(&tt_req_node->list)) {
+ hlist_del_init(&tt_req_node->list);
+ batadv_tt_req_node_put(tt_req_node);
+ }
spin_unlock_bh(&bat_priv->tt.req_list_lock);
- kfree(tt_req_node);
}
+
+ if (tt_req_node)
+ batadv_tt_req_node_put(tt_req_node);
+
kfree(tvlv_tt_data);
return ret;
}
if (!batadv_compare_eth(node->addr, resp_src))
continue;
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
u32 last_bcast_seqno;
struct hlist_head neigh_list;
- /* neigh_list_lock protects: neigh_list and router */
+ /* neigh_list_lock protects: neigh_list, ifinfo_list,
+ * last_bonding_candidate and router
+ */
spinlock_t neigh_list_lock;
struct hlist_node hash_entry;
struct batadv_priv *bat_priv;
* @addr: mac address of claimed non-mesh client
* @vid: vlan id this client was detected on
* @backbone_gw: pointer to backbone gw claiming this client
+ * @backbone_lock: lock protecting backbone_gw pointer
* @lasttime: last time we heard of claim (locals only)
* @hash_entry: hlist node for batadv_priv_bla::claim_hash
* @refcount: number of contexts the object is used
u8 addr[ETH_ALEN];
unsigned short vid;
struct batadv_bla_backbone_gw *backbone_gw;
+ spinlock_t backbone_lock; /* protects backbone_gw */
unsigned long lasttime;
struct hlist_node hash_entry;
struct rcu_head rcu;
* struct batadv_tt_req_node - data to keep track of the tt requests in flight
* @addr: mac address address of the originator this request was sent to
* @issued_at: timestamp used for purging stale tt requests
+ * @refcount: number of contexts the object is used by
* @list: list node for batadv_priv_tt::req_list
*/
struct batadv_tt_req_node {
u8 addr[ETH_ALEN];
unsigned long issued_at;
+ struct kref refcount;
struct hlist_node list;
};
* change from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst)
fdb_delete_local(br, NULL, f);
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
-/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void __br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
/* check if vlan is allowed, to avoid spoofing */
if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+}
+
+/* note: already called with rcu_read_lock */
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+ __br_handle_local_finish(skb);
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
br_pass_frame_up(skb);
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
- break;
+ *pskb = skb;
+ __br_handle_local_finish(skb);
+ return RX_HANDLER_PASS;
case 0x01: /* IEEE MAC (Pause) */
goto drop;
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
+ br->has_ipv6_addr = 0;
return NULL;
}
+
+ br->has_ipv6_addr = 1;
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
+ br->has_ipv6_addr = 1;
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
- unsigned int mtu = ip_skb_dst_mtu(skb);
+ unsigned int mtu = ip_skb_dst_mtu(sk, skb);
struct iphdr *iph = ip_hdr(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
struct bridge_vlan_xstats vxi;
struct br_vlan_stats stats;
- if (vl_idx++ < *prividx)
+ if (++vl_idx < *prividx)
continue;
memset(&vxi, 0, sizeof(vxi));
vxi.vid = v->vid;
u8 multicast_disabled:1;
u8 multicast_querier:1;
u8 multicast_query_use_ifaddr:1;
+ u8 has_ipv6_addr:1;
u32 hash_elasticity;
u32 hash_max;
static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
- struct bridge_mcast_other_query *querier)
+ struct bridge_mcast_other_query *querier,
+ const bool is_ipv6)
{
+ bool own_querier_enabled;
+
+ if (br->multicast_querier) {
+ if (is_ipv6 && !br->has_ipv6_addr)
+ own_querier_enabled = false;
+ else
+ own_querier_enabled = true;
+ } else {
+ own_querier_enabled = false;
+ }
+
return time_is_before_jiffies(querier->delay_time) &&
- (br->multicast_querier || timer_pending(&querier->timer));
+ (own_querier_enabled || timer_pending(&querier->timer));
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
- return __br_multicast_querier_exists(br, &br->ip4_other_query);
+ return __br_multicast_querier_exists(br,
+ &br->ip4_other_query, false);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
- return __br_multicast_querier_exists(br, &br->ip6_other_query);
+ return __br_multicast_querier_exists(br,
+ &br->ip6_other_query, true);
#endif
default:
return false;
const struct ceph_osd_request_target *t,
struct ceph_pg_pool_info *pi)
{
- bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
- bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+ bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
__pool_full(pi);
WARN_ON(pi->id != t->base_oloc.pool);
bool force_resend = false;
bool need_check_tiering = false;
bool need_resend = false;
- bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
- CEPH_OSDMAP_SORTBITWISE);
+ bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
enum calc_target_result ct_res;
int ret;
*/
msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
- dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
- req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
- req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
+ dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
+ req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
+ msg->front.iov_len, data_len);
}
/*
verify_osdc_locked(osdc);
WARN_ON(!osdc->osdmap->epoch);
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
dout("%s osdc %p continuous\n", __func__, osdc);
continuous = true;
} else {
}
if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
dout("req %p pausewr\n", req);
req->r_t.paused = true;
maybe_request_map(osdc);
} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
dout("req %p pauserd\n", req);
req->r_t.paused = true;
maybe_request_map(osdc);
} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
!(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
CEPH_OSD_FLAG_FULL_FORCE)) &&
- (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
dout("req %p full/pool_full\n", req);
pr_warn_ratelimited("FULL or reached pool quota\n");
struct ceph_osd_request *req = lreq->ping_req;
struct ceph_osd_req_op *op = &req->r_ops[0];
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
dout("%s PAUSERD\n", __func__);
return;
}
dout("req %p tid %llu cb\n", req, req->r_tid);
__complete_request(req);
}
+ if (m.flags & CEPH_OSD_FLAG_ONDISK)
+ complete_all(&req->r_safe_completion);
+ ceph_osdc_put_request(req);
} else {
if (req->r_unsafe_callback) {
dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
WARN_ON(1);
}
}
- if (m.flags & CEPH_OSD_FLAG_ONDISK)
- complete_all(&req->r_safe_completion);
- ceph_osdc_put_request(req);
return;
fail_request:
bool skipped_map = false;
bool was_full;
- was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+ was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
set_pool_was_full(osdc);
if (incremental)
osdc->osdmap = newmap;
}
- was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+ was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
need_resend, need_resend_linger);
if (ceph_check_fsid(osdc->client, &fsid) < 0)
goto bad;
- was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
- was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+ was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
have_pool_full(osdc);
/* incremental maps */
* we find out when we are no longer full and stop returning
* ENOSPC.
*/
- pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
- pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+ pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
have_pool_full(osdc);
if (was_pauserd || was_pausewr || pauserd || pausewr)
maybe_request_map(osdc);
return map;
}
+/*
+ * Encoding order is (new_up_client, new_state, new_weight). Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ * new_up_client: { osd=6, addr=... } # set osd_state and addr
+ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+ struct ceph_osdmap *map)
+{
+ void *new_up_client;
+ void *new_state;
+ void *new_weight_end;
+ u32 len;
+
+ new_up_client = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ new_state = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(u8);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ /* new_weight */
+ ceph_decode_32_safe(p, end, len, e_inval);
+ while (len--) {
+ s32 osd;
+ u32 w;
+
+ ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+ osd = ceph_decode_32(p);
+ w = ceph_decode_32(p);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d weight 0x%x %s\n", osd, w,
+ w == CEPH_OSD_IN ? "(in)" :
+ (w == CEPH_OSD_OUT ? "(out)" : ""));
+ map->osd_weight[osd] = w;
+
+ /*
+ * If we are marking in, set the EXISTS, and clear the
+ * AUTOOUT and NEW bits.
+ */
+ if (w) {
+ map->osd_state[osd] |= CEPH_OSD_EXISTS;
+ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+ CEPH_OSD_NEW);
+ }
+ }
+ new_weight_end = *p;
+
+ /* new_state (up/down) */
+ *p = new_state;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ u8 xorstate;
+ int ret;
+
+ osd = ceph_decode_32(p);
+ xorstate = ceph_decode_8(p);
+ if (xorstate == 0)
+ xorstate = CEPH_OSD_UP;
+ BUG_ON(osd >= map->max_osd);
+ if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+ (xorstate & CEPH_OSD_UP))
+ pr_info("osd%d down\n", osd);
+ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+ (xorstate & CEPH_OSD_EXISTS)) {
+ pr_info("osd%d does not exist\n", osd);
+ map->osd_weight[osd] = CEPH_OSD_IN;
+ ret = set_primary_affinity(map, osd,
+ CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+ if (ret)
+ return ret;
+ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+ map->osd_state[osd] = 0;
+ } else {
+ map->osd_state[osd] ^= xorstate;
+ }
+ }
+
+ /* new_up_client */
+ *p = new_up_client;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ struct ceph_entity_addr addr;
+
+ osd = ceph_decode_32(p);
+ ceph_decode_copy(p, &addr, sizeof(addr));
+ ceph_decode_addr(&addr);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d up\n", osd);
+ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+ map->osd_addr[osd] = addr;
+ }
+
+ *p = new_weight_end;
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
/*
* decode and apply an incremental map update.
*/
__remove_pg_pool(&map->pg_pools, pi);
}
- /* new_up */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- struct ceph_entity_addr addr;
- ceph_decode_32_safe(p, end, osd, e_inval);
- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
- ceph_decode_addr(&addr);
- pr_info("osd%d up\n", osd);
- BUG_ON(osd >= map->max_osd);
- map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
- map->osd_addr[osd] = addr;
- }
-
- /* new_state */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- u8 xorstate;
- ceph_decode_32_safe(p, end, osd, e_inval);
- xorstate = **(u8 **)p;
- (*p)++; /* clean flag */
- if (xorstate == 0)
- xorstate = CEPH_OSD_UP;
- if (xorstate & CEPH_OSD_UP)
- pr_info("osd%d down\n", osd);
- if (osd < map->max_osd)
- map->osd_state[osd] ^= xorstate;
- }
-
- /* new_weight */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd, off;
- ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
- osd = ceph_decode_32(p);
- off = ceph_decode_32(p);
- pr_info("osd%d weight 0x%x %s\n", osd, off,
- off == CEPH_OSD_IN ? "(in)" :
- (off == CEPH_OSD_OUT ? "(out)" : ""));
- if (osd < map->max_osd)
- map->osd_weight[osd] = off;
- }
+ /* new_up_client, new_state, new_weight */
+ err = decode_new_up_state_weight(p, end, map);
+ if (err)
+ goto bad;
/* new_pg_temp */
err = decode_new_pg_temp(p, end, map);
raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
oid->name_len);
- dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len,
- oid->name, raw_pgid->pool, raw_pgid->seed);
+ dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
+ raw_pgid->pool, raw_pgid->seed);
return 0;
}
EXPORT_SYMBOL(ceph_object_locator_to_pg);
__scm_destroy(scm);
}
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
+/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
{
struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
__get_user(ptr, &fprog32->filter) ||
__put_user(len, &kfprog->len) ||
__put_user(compat_ptr(ptr), &kfprog->filter))
+ return NULL;
+
+ return kfprog;
+}
+EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
+
+static int do_set_attach_filter(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock_fprog __user *kfprog;
+
+ kfprog = get_compat_bpf_fprog(optval);
+ if (!kfprog)
return -EFAULT;
return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
- if (optname == SO_ATTACH_FILTER)
+ if (optname == SO_ATTACH_FILTER ||
+ optname == SO_ATTACH_REUSEPORT_CBPF)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
#include <net/sock_reuseport.h>
/**
- * sk_filter - run a packet through a socket filter
+ * sk_filter_trim_cap - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
+ * @cap: limit on how short the eBPF program may trim the packet
*
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
* be accepted or -EPERM if the packet should be tossed.
*
*/
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
int err;
struct sk_filter *filter;
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-
- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();
return err;
}
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
}
static bool sk_filter_is_valid_access(int off, int size,
- enum bpf_access_type type)
+ enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
switch (off) {
case offsetof(struct __sk_buff, tc_classid):
}
static bool tc_cls_act_is_valid_access(int off, int size,
- enum bpf_access_type type)
+ enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
if (type == BPF_WRITE) {
switch (off) {
return false;
}
}
+
+ switch (off) {
+ case offsetof(struct __sk_buff, data):
+ *reg_type = PTR_TO_PACKET;
+ break;
+ case offsetof(struct __sk_buff, data_end):
+ *reg_type = PTR_TO_PACKET_END;
+ break;
+ }
+
return __is_valid_access(off, size, type);
}
}
EXPORT_SYMBOL(make_flow_keys_digest);
+static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
+
+u32 __skb_get_hash_symmetric(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+
+ __flow_hash_secret_init();
+
+ memset(&keys, 0, sizeof(keys));
+ __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
+ NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+ return __flow_hash_from_keys(&keys, hashrnd);
+}
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
/**
* __skb_get_hash: calculate a flow hash
* @skb: sk_buff to calculate flow hash from
},
};
+static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+};
+
static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
skb_flow_dissector_init(&flow_keys_dissector,
flow_keys_dissector_keys,
ARRAY_SIZE(flow_keys_dissector_keys));
+ skb_flow_dissector_init(&flow_keys_dissector_symmetric,
+ flow_keys_dissector_symmetric_keys,
+ ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
skb_flow_dissector_init(&flow_keys_buf_dissector,
flow_keys_buf_dissector_keys,
ARRAY_SIZE(flow_keys_buf_dissector_keys));
* @xstats_type: TLV type for backward compatibility xstats TLV
* @lock: statistics lock
* @d: dumping handle
+ * @padattr: padding attribute
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
* @type: TLV type for top level statistic TLV
* @lock: statistics lock
* @d: dumping handle
+ * @padattr: padding attribute
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
spin_lock_irqsave(&bm_pool->lock, flags);
if (bm_pool->buf_num == bm_pool->size) {
pr_warn("pool already filled\n");
+ spin_unlock_irqrestore(&bm_pool->lock, flags);
return bm_pool->buf_num;
}
if (buf_num + bm_pool->buf_num > bm_pool->size) {
pr_warn("cannot allocate %d buffers for pool\n",
buf_num);
+ spin_unlock_irqrestore(&bm_pool->lock, flags);
return 0;
}
if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
buf_num, bm_pool->buf_num);
+ spin_unlock_irqrestore(&bm_pool->lock, flags);
return 0;
}
tbl = neigh_tables[index];
if (!tbl)
goto out;
+ rcu_read_lock_bh();
neigh = __neigh_lookup_noref(tbl, addr, dev);
if (!neigh)
neigh = __neigh_create(tbl, addr, dev, false);
err = PTR_ERR(neigh);
- if (IS_ERR(neigh))
+ if (IS_ERR(neigh)) {
+ rcu_read_unlock_bh();
goto out_kfree_skb;
+ }
err = neigh->output(neigh, skb);
+ rcu_read_unlock_bh();
}
else if (index == NEIGH_LINK_TABLE) {
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
#include <linux/jiffies.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include "net-sysfs.h"
hrtimer_set_expires(&t.timer, spin_until);
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
- if (remaining <= 0) {
- pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
- return;
- }
+ if (remaining <= 0)
+ goto out;
start_time = ktime_get();
if (remaining < 100000) {
}
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+out:
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
+ destroy_hrtimer_on_stack(&t.timer);
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
}
EXPORT_SYMBOL_GPL(skb_append_pagefrags);
-/**
- * skb_push_rcsum - push skb and update receive checksum
- * @skb: buffer to update
- * @len: length of data pulled
- *
- * This function performs an skb_push on the packet and updates
- * the CHECKSUM_COMPLETE checksum. It should be used on
- * receive path processing instead of skb_push unless you know
- * that the checksum difference is zero (e.g., a valid IP header)
- * or you are setting ip_summed to CHECKSUM_NONE.
- */
-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
-{
- skb_push(skb, len);
- skb_postpush_rcsum(skb, skb->data, len);
- return skb->data;
-}
-
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested, unsigned int trim_cap)
{
int rc = NET_RX_SUCCESS;
- if (sk_filter(sk, skb))
+ if (sk_filter_trim_cap(sk, skb, trim_cap))
goto discard_and_relse;
skb->dev = NULL;
kfree_skb(skb);
goto out;
}
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
sockc->tsflags |= tsflags;
break;
+ /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
+ case SCM_RIGHTS:
+ case SCM_CREDENTIALS:
+ break;
default:
return -EINVAL;
}
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
- __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
rxiph->daddr);
skb_dst_set(skb, dst_clone(dst));
+ local_bh_disable();
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
rxiph->daddr, rxiph->saddr, NULL);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
- DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
- DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+ __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
}
+ local_bh_enable();
out:
- dst_release(dst);
+ dst_release(dst);
}
static void dccp_v4_reqsk_destructor(struct request_sock *req)
goto discard_and_relse;
nf_reset(skb);
- return sk_receive_skb(sk, skb, 1);
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
- return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
#include <net/dn_fib.h>
#include <net/dn_neigh.h>
#include <net/dn_dev.h>
+#include <net/nexthop.h>
#define RT_MIN_TABLE 1
struct rtnexthop *nhp = nla_data(attr);
int nhs = 0, nhlen = nla_len(attr);
- while(nhlen >= (int)sizeof(struct rtnexthop)) {
- if ((nhlen -= nhp->rtnh_len) < 0)
- return 0;
+ while (rtnh_ok(nhp, nhlen)) {
nhs++;
- nhp = RTNH_NEXT(nhp);
+ nhp = rtnh_next(nhp, &nhlen);
}
- return nhs;
+ /* leftover implies invalid nexthop configuration, discard it */
+ return nhlen > 0 ? 0 : nhs;
}
static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
int nhlen = nla_len(attr);
change_nexthops(fi) {
- int attrlen = nhlen - sizeof(struct rtnexthop);
- if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
+ int attrlen;
+
+ if (!rtnh_ok(nhp, nhlen))
return -EINVAL;
nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
nh->nh_oif = nhp->rtnh_ifindex;
nh->nh_weight = nhp->rtnh_hops + 1;
- if (attrlen) {
+ attrlen = rtnh_attrlen(nhp);
+ if (attrlen > 0) {
struct nlattr *gw_attr;
gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
}
- nhp = RTNH_NEXT(nhp);
+
+ nhp = rtnh_next(nhp, &nhlen);
} endfor_nexthops(fi);
return 0;
nl802154_dev_addr_policy))
return -EINVAL;
- if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
- !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+ if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
+ !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
!(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
return -EINVAL;
*/
net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+
+ /* Default values for sysctl-controlled parameters.
+ * We set them here, in case sysctl is not compiled.
+ */
+ net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
+ net->ipv4.sysctl_ip_dynaddr = 0;
+ net->ipv4.sysctl_ip_early_demux = 1;
+
return 0;
}
void *tmp;
};
+struct esp_output_extra {
+ __be32 seqhi;
+ u32 esphoff;
+};
+
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
*
* TODO: Use spare space in skb for this where possible.
*/
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
{
unsigned int len;
- len = seqhilen;
+ len = extralen;
len += crypto_aead_ivsize(aead);
return kmalloc(len, GFP_ATOMIC);
}
-static inline __be32 *esp_tmp_seqhi(void *tmp)
+static inline void *esp_tmp_extra(void *tmp)
{
- return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+ return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
}
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
{
return crypto_aead_ivsize(aead) ?
- PTR_ALIGN((u8 *)tmp + seqhilen,
- crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
+ PTR_ALIGN((u8 *)tmp + extralen,
+ crypto_aead_alignmask(aead) + 1) : tmp + extralen;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
{
struct ip_esp_hdr *esph = (void *)(skb->data + offset);
void *tmp = ESP_SKB_CB(skb)->tmp;
- __be32 *seqhi = esp_tmp_seqhi(tmp);
+ __be32 *seqhi = esp_tmp_extra(tmp);
esph->seq_no = esph->spi;
esph->spi = *seqhi;
static void esp_output_restore_header(struct sk_buff *skb)
{
- esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
+ void *tmp = ESP_SKB_CB(skb)->tmp;
+ struct esp_output_extra *extra = esp_tmp_extra(tmp);
+
+ esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
+ sizeof(__be32));
}
static void esp_output_done_esn(struct crypto_async_request *base, int err)
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
+ struct esp_output_extra *extra;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
int tfclen;
int nfrags;
int assoclen;
- int seqhilen;
- __be32 *seqhi;
+ int extralen;
__be64 seqno;
/* skb is pure payload to encrypt */
nfrags = err;
assoclen = sizeof(*esph);
- seqhilen = 0;
+ extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
- seqhilen += sizeof(__be32);
- assoclen += seqhilen;
+ extralen += sizeof(*extra);
+ assoclen += sizeof(__be32);
}
- tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+ tmp = esp_alloc_tmp(aead, nfrags, extralen);
if (!tmp) {
err = -ENOMEM;
goto error;
}
- seqhi = esp_tmp_seqhi(tmp);
- iv = esp_tmp_iv(aead, tmp, seqhilen);
+ extra = esp_tmp_extra(tmp);
+ iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
* encryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
- *seqhi = esph->spi;
+ extra->esphoff = (unsigned char *)esph -
+ skb_transport_header(skb);
+ esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+ extra->seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
}
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
- seqhi = esp_tmp_seqhi(tmp);
+ seqhi = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
+ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ return -EINVAL;
+
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
goto err_inval;
+ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ goto err_inval;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
/* Fills in tpi and returns header length to be pulled. */
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err, __be16 proto)
+ bool *csum_err, __be16 proto, int nhs)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+ if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ greh = (struct gre_base_hdr *)(skb->data + nhs);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
- if (!pskb_may_pull(skb, hdr_len))
+ if (!pskb_may_pull(skb, nhs + hdr_len))
return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ greh = (struct gre_base_hdr *)(skb->data + nhs);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
#include <net/gre.h>
#include <net/dst_metadata.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
-#endif
-
/*
Problems & solutions
--------------------
* by themselves???
*/
+ const struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
+ if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
+ iph->ihl * 4) < 0) {
if (!csum_err) /* ignore csum errors. */
return;
}
}
#endif
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+ hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
if (hdr_len < 0)
goto drop;
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
struct ip_tunnel *t;
int err;
t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL);
- if (err < 0)
- goto out;
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
if (err)
goto out;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto out;
+
return dev;
out:
- free_netdev(dev);
+ ip_tunnel_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
return dst_output(net, sk, skb);
}
#endif
- mtu = ip_skb_dst_mtu(skb);
+ mtu = ip_skb_dst_mtu(sk, skb);
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
iph = ip_hdr(skb);
- mtu = ip_skb_dst_mtu(skb);
+ mtu = ip_skb_dst_mtu(sk, skb);
if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
mtu = IPCB(skb)->frag_max_size;
static __be32 ic_netmask = NONE; /* Netmask for local subnet */
__be32 ic_gateway = NONE; /* Gateway IP address */
-__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#ifdef IPCONFIG_DYNAMIC
+static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#endif
__be32 ic_servaddr = NONE; /* Boot server IP address */
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c)
+ if (c) {
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS;
+ }
return c;
}
if (!net->ipv4.sysctl_local_reserved_ports)
goto err_ports;
- net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
- net->ipv4.sysctl_ip_dynaddr = 0;
- net->ipv4.sysctl_ip_early_demux = 1;
-
return 0;
err_ports:
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
return flag;
}
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+ u32 *last_oow_ack_time)
+{
+ if (*last_oow_ack_time) {
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+ NET_INC_STATS(net, mib_idx);
+ return true; /* rate-limited: don't send yet! */
+ }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+ return false; /* not rate-limited: go ahead, send dupack now! */
+}
+
/* Return true if we're currently rate-limiting out-of-window ACKs and
* thus shouldn't send a dupack right now. We rate-limit dupacks in
* response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
/* Data packets without SYNs are not likely part of an ACK loop. */
if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
!tcp_hdr(skb)->syn)
- goto not_rate_limited;
-
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
- NET_INC_STATS(net, mib_idx);
- return true; /* rate-limited: don't send yet! */
- }
- }
-
- *last_oow_ack_time = tcp_time_stamp;
+ return false;
-not_rate_limited:
- return false; /* not rate-limited: go ahead, send dupack now! */
+ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
}
/* RFC 5961 7 [ACK Throttling] */
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
- if (tcp_oow_rate_limited(sock_net(sk), skb,
- LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
- &tp->last_oow_ack_time))
+ if (__tcp_oow_rate_limited(sock_net(sk),
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+ &tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
struct sk_buff *hole = NULL;
- u32 last_lost;
+ u32 max_segs, last_lost;
int mib_idx;
int fwd_rexmitting = 0;
last_lost = tp->snd_una;
}
+ max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
int segs;
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
return;
+ /* In case tcp_shift_skb_data() have aggregated large skbs,
+ * we need to make sure not sending too bigs TSO packets
+ */
+ segs = min_t(int, segs, max_segs);
if (fwd_rexmitting) {
begin_fwd:
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
-static inline int compute_score(struct sock *sk, struct net *net,
- __be32 saddr, unsigned short hnum, __be16 sport,
- __be32 daddr, __be16 dport, int dif)
+static int compute_score(struct sock *sk, struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
return score;
}
-/*
- * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
- */
-static inline int compute_score2(struct sock *sk, struct net *net,
- __be32 saddr, __be16 sport,
- __be32 daddr, unsigned int hnum, int dif)
-{
- int score;
- struct inet_sock *inet;
-
- if (!net_eq(sock_net(sk), net) ||
- ipv6_only_sock(sk))
- return -1;
-
- inet = inet_sk(sk);
-
- if (inet->inet_rcv_saddr != daddr ||
- inet->inet_num != hnum)
- return -1;
-
- score = (sk->sk_family == PF_INET) ? 2 : 1;
-
- if (inet->inet_daddr) {
- if (inet->inet_daddr != saddr)
- return -1;
- score += 4;
- }
-
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score += 4;
- }
-
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score += 4;
- }
-
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
- score++;
-
- return score;
-}
-
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
udp_ehash_secret + net_hash_mix(net));
}
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2, unsigned int slot2,
+ struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score2(sk, net, saddr, sport,
+ score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, slot2, skb);
+ hslot2, skb);
if (!result) {
+ unsigned int old_slot2 = slot2;
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
+ /* avoid searching the same slot again. */
+ if (unlikely(slot2 == old_slot2))
+ return result;
+
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
- htonl(INADDR_ANY), hnum, dif,
- hslot2, slot2, skb);
+ daddr, hnum, dif,
+ hslot2, skb);
}
return result;
}
result = NULL;
badness = 0;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, hnum, sport,
- daddr, dport, dif);
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
}
}
- if (rcu_access_pointer(sk->sk_filter)) {
- if (udp_lib_checksum_complete(skb))
+ if (rcu_access_pointer(sk->sk_filter) &&
+ udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter(sk, skb))
- goto drop;
- }
+
+ if (sk_filter(sk, skb))
+ goto drop;
+ if (unlikely(skb->len < sizeof(struct udphdr)))
+ goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
return err;
}
- return skb_checksum_init_zero_check(skb, proto, uh->check,
- inet_compute_pseudo);
+ /* Note, we are only interested in != 0 or == 0, thus the
+ * force to int.
+ */
+ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+ inet_compute_pseudo);
}
/*
Saying M here will produce a module called ip6_gre. If unsure, say N.
+config IPV6_FOU
+ tristate
+ default NET_FOU && IPV6
+
+config IPV6_FOU_TUNNEL
+ tristate
+ default NET_FOU_IP_TUNNELS && IPV6_FOU
+ select IPV6_TUNNEL
+
config IPV6_MULTIPLE_TABLES
bool "IPv6: Multiple Routing Tables"
select FIB_RULES
obj-$(CONFIG_IPV6_SIT) += sit.o
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
-obj-$(CONFIG_NET_FOU) += fou6.o
+obj-$(CONFIG_IPV6_FOU) += fou6.o
obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
}
EXPORT_SYMBOL(gue6_build_header);
-#ifdef CONFIG_NET_FOU_IP_TUNNELS
+#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
.encap_hlen = fou_encap_hlen,
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
- ping_err(skb, offset, info);
+ ping_err(skb, offset, ntohl(info));
}
static int icmpv6_rcv(struct sk_buff *skb);
* we accept a checksum of zero here. When we find the socket
* for the UDP packet we'll check if that socket allows zero checksum
* for IPv6 (set by socket option).
+ *
+ * Note, we are only interested in != 0 or == 0, thus the
+ * force to int.
*/
- return skb_checksum_init_zero_check(skb, proto, uh->check,
- ip6_compute_pseudo);
+ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+ ip6_compute_pseudo);
}
EXPORT_SYMBOL(udp6_csum_init);
}
}
+ free_percpu(non_pcpu_rt->rt6i_pcpu);
non_pcpu_rt->rt6i_pcpu = NULL;
}
bool csum_err = false;
int hdr_len;
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+ hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
if (hdr_len < 0)
goto drop;
fl6->daddr = p->raddr;
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
+ fl6->flowi6_proto = IPPROTO_GRE;
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
dev->mtu = ETH_DATA_LEN - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ dev->mtu -= ETH_HLEN;
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
if (ret)
return ret;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
tunnel = netdev_priv(dev);
ip6gre_tnl_link_config(tunnel, 1);
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
}
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
const struct in6_addr *final_dst)
{
struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
- int err;
dst = ip6_sk_dst_check(sk, dst, fl6);
+ if (!dst)
+ dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
- err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
- if (err)
- return ERR_PTR(err);
- if (final_dst)
- fl6->daddr = *final_dst;
-
- return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+ return dst;
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (!c)
return NULL;
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXMIFS;
return c;
}
fl6.daddr = *gw;
fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+ fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = 0;
+ int flags = RT6_LOOKUP_F_IFACE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, IPPROTO_IPV6, 0);
+ t->parms.link, 0, iph->protocol, 0);
err = 0;
goto out;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- IPPROTO_IPV6, 0);
+ iph->protocol, 0);
err = 0;
goto out;
}
static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr,
int oif, struct tcp_md5sig_key *key, int rst,
- u8 tclass, u32 label)
+ u8 tclass, __be32 label)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key, u8 tclass,
- u32 label)
+ __be32 label)
{
tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
tclass, label);
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
udp_lib_rehash(sk, new_hash);
}
-static inline int compute_score(struct sock *sk, struct net *net,
- unsigned short hnum,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif)
+static int compute_score(struct sock *sk, struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, unsigned short hnum,
+ int dif)
{
int score;
struct inet_sock *inet;
return score;
}
-static inline int compute_score2(struct sock *sk, struct net *net,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr,
- unsigned short hnum, int dif)
-{
- int score;
- struct inet_sock *inet;
-
- if (!net_eq(sock_net(sk), net) ||
- udp_sk(sk)->udp_port_hash != hnum ||
- sk->sk_family != PF_INET6)
- return -1;
-
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
- return -1;
-
- score = 0;
- inet = inet_sk(sk);
-
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score++;
- }
-
- if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
- return -1;
- score++;
- }
-
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score++;
- }
-
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
- score++;
-
- return score;
-}
-
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2, unsigned int slot2,
+ struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score2(sk, net, saddr, sport,
+ score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, slot2, skb);
+ hslot2, skb);
if (!result) {
+ unsigned int old_slot2 = slot2;
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
+ /* avoid searching the same slot again. */
+ if (unlikely(slot2 == old_slot2))
+ return result;
+
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
- &in6addr_any, hnum, dif,
- hslot2, slot2, skb);
+ daddr, hnum, dif,
+ hslot2, skb);
}
return result;
}
result = NULL;
badness = -1;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
+ score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
}
}
- if (rcu_access_pointer(sk->sk_filter)) {
- if (udp_lib_checksum_complete(skb))
- goto csum_error;
- if (sk_filter(sk, skb))
- goto drop;
- }
+ if (rcu_access_pointer(sk->sk_filter) &&
+ udp_lib_checksum_complete(skb))
+ goto csum_error;
+
+ if (sk_filter(sk, skb))
+ goto drop;
+ if (unlikely(skb->len < sizeof(struct udphdr)))
+ goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
.open = kcm_seq_open,
.read = seq_read,
.llseek = seq_lseek,
+ .release = seq_release_net,
};
static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
- struct udp_tunnel_sock_cfg udp_cfg;
+ struct udp_tunnel_sock_cfg udp_cfg = { };
udp_cfg.sk_user_data = tunnel;
udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
*/
static int l2tp_ip6_recv(struct sk_buff *skb)
{
+ struct net *net = dev_net(skb->dev);
struct sock *sk;
u32 session_id;
u32 tunnel_id;
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(&init_net, NULL, session_id);
+ session = l2tp_session_find(net, NULL, session_id);
if (session == NULL)
goto discard;
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
- sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
0, tunnel_id);
read_unlock_bh(&l2tp_ip6_lock);
}
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+ struct net *net = sock_net(sk);
__be32 v4addr = 0;
int addr_type;
int err;
err = -EADDRINUSE;
read_lock_bh(&l2tp_ip6_lock);
- if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
+ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
sk->sk_bound_dev_if, addr->l2tp_conn_id))
goto out_in_use;
read_unlock_bh(&l2tp_ip6_lock);
return 0;
drop:
- IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
return -1;
}
break;
case LAPB_FRMR:
- lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+ lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
lapb->dev, frame->pf,
- skb->data[0], skb->data[1], skb->data[2],
- skb->data[3], skb->data[4]);
+ skb->data);
lapb_establish_data_link(lapb);
lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
lapb_requeue_frames(lapb);
}
}
- lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[0], skb->data[1], skb->data[2]);
+ lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
if (!lapb_data_transmit(lapb, skb))
kfree_skb(skb);
{
frame->type = LAPB_ILLEGAL;
- lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[0], skb->data[1], skb->data[2]);
+ lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
/* We always need to look at 2 bytes, sometimes we need
* to look at 3 and those cases are handled below.
dptr++;
*dptr++ = lapb->frmr_type;
- lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+ lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
lapb->dev, lapb->state,
- skb->data[1], skb->data[2], skb->data[3],
- skb->data[4], skb->data[5]);
+ &skb->data[1]);
} else {
dptr = skb_put(skb, 4);
*dptr++ = LAPB_FRMR;
dptr++;
*dptr++ = lapb->frmr_type;
- lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
- lapb->dev, lapb->state, skb->data[1],
- skb->data[2], skb->data[3]);
+ lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
+ lapb->dev, lapb->state, &skb->data[1]);
}
lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
void mesh_sta_cleanup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed;
+ u32 changed = 0;
/*
* maybe userspace handles peer allocation and peering, but in either
* case the beacon is still generated by the kernel and we might need
* an update.
*/
- changed = mesh_accept_plinks_update(sdata);
+ if (sdata->u.mesh.user_mpm &&
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed |= mesh_plink_dec_estab_count(sdata);
+ changed |= mesh_accept_plinks_update(sdata);
if (!sdata->u.mesh.user_mpm) {
changed |= mesh_plink_deactivate(sta);
del_timer_sync(&sta->mesh->plink_timer);
}
+ /* make sure no readers can access nexthop sta from here on */
+ mesh_path_flush_by_nexthop(sta);
+ synchronize_net();
+
if (changed)
ieee80211_mbss_info_change_notify(sdata, changed);
}
u8 sa_offs, da_offs, pn_offs;
u8 band;
u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
- sizeof(rfc1042_header)];
+ sizeof(rfc1042_header)] __aligned(2);
struct rcu_head rcu_head;
};
* If available, return 1, otherwise invalidate this connection
* template and return 0.
*/
-int ip_vs_check_template(struct ip_vs_conn *ct)
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = ct->ipvs;
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- expire_quiescent_template(ipvs, dest)) {
+ expire_quiescent_template(ipvs, dest) ||
+ (cdest && (dest != cdest))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
/* Check if a template already exists */
ct = ip_vs_ct_in_get(¶m);
- if (!ct || !ip_vs_check_template(ct)) {
+ if (!ct || !ip_vs_check_template(ct, NULL)) {
struct ip_vs_scheduler *sched;
/*
vport, ¶m) < 0)
return NULL;
ct = ip_vs_ct_in_get(¶m);
- if (!ct) {
+ /* check if template exists and points to the same dest */
+ if (!ct || !ip_vs_check_template(ct, dest)) {
ct = ip_vs_conn_new(¶m, dest->af, daddr, dport,
IP_VS_CONN_F_TEMPLATE, dest, 0);
if (!ct) {
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ int ifindex)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+ sock->sk->sk_bound_dev_if = ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
if (state == IP_VS_STATE_MASTER)
sock = make_send_sock(ipvs, id);
else
- sock = make_receive_sock(ipvs, id);
+ sock = make_receive_sock(ipvs, id, dev->ifindex);
if (IS_ERR(sock)) {
result = PTR_ERR(sock);
goto outtinfo;
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->allow_clash &&
+ !nfct_nat(ct) &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
nf_conntrack_tstamp_fini();
nf_conntrack_acct_fini();
nf_conntrack_expect_fini();
+
+ kmem_cache_destroy(nf_conntrack_cachep);
}
/*
unsigned int nr_slots, i;
size_t sz;
+ if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+ if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
if (ret) {
pr_err("failed to register helper for pf: %d port: %d\n",
ftp[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_ftp_fini();
return ret;
}
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
- int ret = 0;
- struct nf_conntrack_helper *cur;
+ struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
+ struct nf_conntrack_helper *cur;
+ int ret = 0;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
mutex_lock(&nf_ct_helper_mutex);
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
- if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
- cur->tuple.src.l3num == me->tuple.src.l3num &&
- cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+ if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
ret = -EEXIST;
goto out;
}
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
irc[i].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_irc_fini();
return ret;
}
if (ret) {
pr_err("failed to register helper for pf: %d port: %d\n",
sane[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_sane_fini();
return ret;
}
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
sip[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_sip_fini();
return ret;
}
{ }
};
-#define NET_NF_CONNTRACK_MAX 2089
-
static struct ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
tftp[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_tftp_fini();
return ret;
}
* Once the queue is registered it must reinject all packets it
* receives, no matter what.
*/
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
/* return EBUSY when somebody else is registered, return EEXIST if the
* same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
{
/* should never happen, we only have one queueing backend in kernel */
- WARN_ON(rcu_access_pointer(queue_handler));
- rcu_assign_pointer(queue_handler, qh);
+ WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+ rcu_assign_pointer(net->nf.queue_handler, qh);
}
EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
{
- RCU_INIT_POINTER(queue_handler, NULL);
- synchronize_rcu();
+ RCU_INIT_POINTER(net->nf.queue_handler, NULL);
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
const struct nf_queue_handler *qh;
rcu_read_lock();
- qh = rcu_dereference(queue_handler);
+ qh = rcu_dereference(net->nf.queue_handler);
if (qh)
qh->nf_hook_drop(net, ops);
rcu_read_unlock();
struct nf_queue_entry *entry = NULL;
const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh;
+ struct net *net = state->net;
/* QUEUE == DROP if no one is waiting, to be safe. */
- qh = rcu_dereference(queue_handler);
+ qh = rcu_dereference(net->nf.queue_handler);
if (!qh) {
status = -ESRCH;
goto err;
err = nf_tables_newexpr(ctx, &info, expr);
if (err < 0)
- goto err2;
+ goto err3;
return expr;
+err3:
+ kfree(expr);
err2:
module_put(info.ops->type->owner);
err1:
/* Only accept unspec with dump */
if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
+ if (!nla[NFTA_SET_TABLE])
+ return -EINVAL;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
* jumps are already validated for that chain.
*/
list_for_each_entry(i, &set->bindings, list) {
- if (binding->flags & NFT_SET_MAP &&
+ if (i->flags & NFT_SET_MAP &&
i->chain == binding->chain)
goto bind;
}
+ iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_bind_check_setelem;
set->ops->walk(ctx, set, &iter);
- if (iter.err < 0) {
- /* Destroy anonymous sets if binding fails */
- if (set->flags & NFT_SET_ANONYMOUS)
- nf_tables_set_destroy(ctx, set);
-
+ if (iter.err < 0)
return iter.err;
- }
}
bind:
binding->chain = ctx->chain;
if (nest == NULL)
goto nla_put_failure;
- args.cb = cb;
- args.skb = skb;
- args.iter.skip = cb->args[0];
- args.iter.count = 0;
- args.iter.err = 0;
- args.iter.fn = nf_tables_dump_setelem;
+ args.cb = cb;
+ args.skb = skb;
+ args.iter.genmask = nft_genmask_cur(ctx.net);
+ args.iter.skip = cb->args[0];
+ args.iter.count = 0;
+ args.iter.err = 0;
+ args.iter.fn = nf_tables_dump_setelem;
set->ops->walk(&ctx, set, &args.iter);
nla_nest_end(skb, nest);
binding->chain != chain)
continue;
+ iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
/* This rule is not active, skip. */
- if (unlikely(rule->genmask & (1 << gencursor)))
+ if (unlikely(rule->genmask & gencursor))
continue;
rulenum++;
if (entskb->tstamp.tv64) {
struct nfqnl_msg_packet_timestamp ts;
- struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+ struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
net->nf.proc_netfilter, &nfqnl_file_ops))
return -ENOMEM;
#endif
+ nf_register_queue_handler(net, &nfqh);
return 0;
}
static void __net_exit nfnl_queue_net_exit(struct net *net)
{
+ nf_unregister_queue_handler(net);
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
#endif
}
+static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
+{
+ synchronize_rcu();
+}
+
static struct pernet_operations nfnl_queue_net_ops = {
- .init = nfnl_queue_net_init,
- .exit = nfnl_queue_net_exit,
- .id = &nfnl_queue_net_id,
- .size = sizeof(struct nfnl_queue_net),
+ .init = nfnl_queue_net_init,
+ .exit = nfnl_queue_net_exit,
+ .exit_batch = nfnl_queue_net_exit_batch,
+ .id = &nfnl_queue_net_id,
+ .size = sizeof(struct nfnl_queue_net),
};
static int __init nfnetlink_queue_init(void)
}
register_netdevice_notifier(&nfqnl_dev_notifier);
- nf_register_queue_handler(&nfqh);
return status;
cleanup_netlink_notifier:
static void __exit nfnetlink_queue_fini(void)
{
- nf_unregister_queue_handler();
unregister_netdevice_notifier(&nfqnl_dev_notifier);
nfnetlink_subsys_unregister(&nfqnl_subsys);
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
const struct nf_conn_help *help;
const struct nf_conntrack_tuple *tuple;
const struct nf_conntrack_helper *helper;
- long diff;
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
return;
#endif
case NFT_CT_EXPIRATION:
- diff = (long)jiffies - (long)ct->timeout.expires;
- if (diff < 0)
- diff = 0;
- *dest = jiffies_to_msecs(diff);
+ *dest = jiffies_to_msecs(nf_ct_expires(ct));
return;
case NFT_CT_HELPER:
if (ct->master == NULL)
struct nft_hash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
- u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int err;
err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
goto cont;
if (nft_set_elem_expired(&he->ext))
goto cont;
- if (!nft_set_elem_active(&he->ext, genmask))
+ if (!nft_set_elem_active(&he->ext, iter->genmask))
goto cont;
elem.priv = he;
skb->pkt_type = value;
break;
case NFT_META_NFTRACE:
- skb->nf_trace = 1;
+ skb->nf_trace = !!value;
break;
default:
WARN_ON(1);
struct nft_rbtree_elem *rbe;
struct nft_set_elem elem;
struct rb_node *node;
- u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
spin_lock_bh(&nft_rbtree_lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&rbe->ext, genmask))
+ if (!nft_set_elem_active(&rbe->ext, iter->genmask))
goto cont;
elem.priv = rbe;
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
- target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
return -EINVAL;
/* compat_xt_entry match has less strict aligment requirements,
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
- target_offset + sizeof(struct xt_standard_target) != next_offset)
+ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
return -EINVAL;
return xt_check_entry_match(elems, base + target_offset,
return !!key->eth.type;
}
+static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
+ __be16 ethertype)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __be16 diff[] = { ~(hdr->h_proto), ethertype };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+ ~skb->csum);
+ }
+
+ hdr->h_proto = ethertype;
+}
+
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_mpls *mpls)
{
__be32 *new_mpls_lse;
- struct ethhdr *hdr;
/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
if (skb->encapsulation)
skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
- hdr = eth_hdr(skb);
- hdr->h_proto = mpls->mpls_ethertype;
-
+ update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
if (!skb->inner_protocol)
skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
* field correctly in the presence of VLAN tags.
*/
hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
- hdr->h_proto = ethertype;
+ update_ethertype(skb, hdr, ethertype);
if (eth_p_mpls(skb->protocol))
skb->protocol = ethertype;
*/
state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
__ovs_ct_update_key(key, state, &info->zone, exp->master);
- } else
- return __ovs_ct_lookup(net, key, info, skb);
+ } else {
+ struct nf_conn *ct;
+ int err;
+
+ err = __ovs_ct_lookup(net, key, info, skb);
+ if (err)
+ return err;
+
+ ct = (struct nf_conn *)skb->nfct;
+ if (ct)
+ nf_ct_deliver_cached_events(ct);
+ }
return 0;
}
#include <net/inet_common.h>
#endif
#include <linux/bpf.h>
+#include <net/compat.h>
#include "internal.h"
struct sk_buff *skb,
unsigned int num)
{
- return reciprocal_scale(skb_get_hash(skb), num);
+ return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
goto out_unlock;
}
- sockc.tsflags = 0;
+ sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
- if (unlikely(err)) {
- err = -EINVAL;
+ if (unlikely(err))
goto out_unlock;
- }
}
skb->protocol = proto;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
}
- sockc.tsflags = 0;
+ sockc.tsflags = po->sk.sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(&po->sk, msg, &sockc);
if (unlikely(err))
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
- sockc.tsflags = 0;
+ sockc.tsflags = sk->sk_tsflags;
sockc.mark = sk->sk_mark;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
}
+#ifdef CONFIG_COMPAT
+static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct packet_sock *po = pkt_sk(sock->sk);
+
+ if (level != SOL_PACKET)
+ return -ENOPROTOOPT;
+
+ if (optname == PACKET_FANOUT_DATA &&
+ po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
+ optval = (char __user *)get_compat_bpf_fprog(optval);
+ if (!optval)
+ return -EFAULT;
+ optlen = sizeof(struct sock_fprog);
+ }
+
+ return packet_setsockopt(sock, level, optname, optval, optlen);
+}
+#endif
+
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
.shutdown = sock_no_shutdown,
.setsockopt = packet_setsockopt,
.getsockopt = packet_getsockopt,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_packet_setsockopt,
+#endif
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,
}
}
- if (conn->c_version < RDS_PROTOCOL(3,1)) {
+ if (conn->c_version < RDS_PROTOCOL(3, 1)) {
printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
" no longer supported\n",
&conn->c_faddr,
*/
static void rds_loop_inc_free(struct rds_incoming *inc)
{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_put(rm);
+ struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+
+ rds_message_put(rm);
}
/* we need to at least give the thread something to succeed */
RDS_CONN_CONNECTING,
RDS_CONN_DISCONNECTING,
RDS_CONN_UP,
+ RDS_CONN_RESETTING,
RDS_CONN_ERROR,
};
void rds_shutdown_worker(struct work_struct *);
void rds_send_worker(struct work_struct *);
void rds_recv_worker(struct work_struct *);
+void rds_connect_path_complete(struct rds_connection *conn, int curr);
void rds_connect_complete(struct rds_connection *conn);
/* transport.c */
minfo.fport = inc->i_hdr.h_dport;
}
+ minfo.flags = 0;
+
rds_info_copy(iter, &minfo, sizeof(minfo));
}
list_splice_init(&conn->c_retrans, &conn->c_send_queue);
spin_unlock_irqrestore(&conn->c_lock, flags);
}
+EXPORT_SYMBOL_GPL(rds_send_reset);
static int acquire_in_xmit(struct rds_connection *conn)
{
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
- rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
+ rds_sysctl_reg_table =
+ register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
}
/*
- * This is the only path that sets tc->t_sock. Send and receive trust that
- * it is set. The RDS_CONN_UP bit protects those paths from being
- * called while it isn't set.
+ * rds_tcp_reset_callbacks() switches the to the new sock and
+ * returns the existing tc->t_sock.
+ *
+ * The only functions that set tc->t_sock are rds_tcp_set_callbacks
+ * and rds_tcp_reset_callbacks. Send and receive trust that
+ * it is set. The absence of RDS_CONN_UP bit protects those paths
+ * from being called while it isn't set.
+ */
+void rds_tcp_reset_callbacks(struct socket *sock,
+ struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ struct socket *osock = tc->t_sock;
+
+ if (!osock)
+ goto newsock;
+
+ /* Need to resolve a duelling SYN between peers.
+ * We have an outstanding SYN to this peer, which may
+ * potentially have transitioned to the RDS_CONN_UP state,
+ * so we must quiesce any send threads before resetting
+ * c_transport_data. We quiesce these threads by setting
+ * c_state to something other than RDS_CONN_UP, and then
+ * waiting for any existing threads in rds_send_xmit to
+ * complete release_in_xmit(). (Subsequent threads entering
+ * rds_send_xmit() will bail on !rds_conn_up().
+ *
+ * However an incoming syn-ack at this point would end up
+ * marking the conn as RDS_CONN_UP, and would again permit
+ * rds_send_xmi() threads through, so ideally we would
+ * synchronize on RDS_CONN_UP after lock_sock(), but cannot
+ * do that: waiting on !RDS_IN_XMIT after lock_sock() may
+ * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
+ * would not get set. As a result, we set c_state to
+ * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
+ * cannot mark rds_conn_path_up() in the window before lock_sock()
+ */
+ atomic_set(&conn->c_state, RDS_CONN_RESETTING);
+ wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
+ lock_sock(osock->sk);
+ /* reset receive side state for rds_tcp_data_recv() for osock */
+ if (tc->t_tinc) {
+ rds_inc_put(&tc->t_tinc->ti_inc);
+ tc->t_tinc = NULL;
+ }
+ tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+ tc->t_tinc_data_rem = 0;
+ tc->t_sock = NULL;
+
+ write_lock_bh(&osock->sk->sk_callback_lock);
+
+ osock->sk->sk_user_data = NULL;
+ osock->sk->sk_data_ready = tc->t_orig_data_ready;
+ osock->sk->sk_write_space = tc->t_orig_write_space;
+ osock->sk->sk_state_change = tc->t_orig_state_change;
+ write_unlock_bh(&osock->sk->sk_callback_lock);
+ release_sock(osock->sk);
+ sock_release(osock);
+newsock:
+ rds_send_reset(conn);
+ lock_sock(sock->sk);
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ tc->t_sock = sock;
+ sock->sk->sk_user_data = conn;
+ sock->sk->sk_data_ready = rds_tcp_data_ready;
+ sock->sk->sk_write_space = rds_tcp_write_space;
+ sock->sk->sk_state_change = rds_tcp_state_change;
+
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+ release_sock(sock->sk);
+}
+
+/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
+ * above rds_tcp_reset_callbacks for notes about synchronization
+ * with data path
*/
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
{
ret = rds_tcp_recv_init();
if (ret)
- goto out_slab;
+ goto out_pernet;
ret = rds_trans_register(&rds_tcp_transport);
if (ret)
out_recv:
rds_tcp_recv_exit();
-out_slab:
+out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
void rds_tcp_tune(struct socket *sock);
void rds_tcp_nonagle(struct socket *sock);
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc);
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
void rds_tcp_xmit_prepare(struct rds_connection *conn);
void rds_tcp_xmit_complete(struct rds_connection *conn);
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off);
+ unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_tcp_write_space(struct sock *sk);
/* tcp_stats.c */
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
- switch(sk->sk_state) {
- /* ignore connecting sockets as they make progress */
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- break;
- case TCP_ESTABLISHED:
- rds_connect_complete(conn);
- break;
- case TCP_CLOSE_WAIT:
- case TCP_CLOSE:
- rds_conn_drop(conn);
- default:
- break;
+ switch (sk->sk_state) {
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+ break;
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSE:
+ rds_conn_drop(conn);
+ default:
+ break;
}
out:
read_unlock_bh(&sk->sk_callback_lock);
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp = NULL;
int conn_state;
- struct sock *nsk;
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
!conn->c_outgoing) {
goto rst_nsk;
} else {
- atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
- wait_event(conn->c_waitq,
- !test_bit(RDS_IN_XMIT, &conn->c_flags));
- rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+ rds_tcp_reset_callbacks(new_sock, conn);
conn->c_outgoing = 0;
+ /* rds_connect_path_complete() marks RDS_CONN_UP */
+ rds_connect_path_complete(conn, RDS_CONN_RESETTING);
}
+ } else {
+ rds_tcp_set_callbacks(new_sock, conn);
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
}
- rds_tcp_set_callbacks(new_sock, conn);
- rds_connect_complete(conn); /* marks RDS_CONN_UP */
new_sock = NULL;
ret = 0;
goto out;
rst_nsk:
/* reset the newly returned accept sock and bail */
- nsk = new_sock->sk;
- rds_tcp_stats_inc(s_tcp_listen_closed_stale);
- nsk->sk_user_data = NULL;
- nsk->sk_prot->disconnect(nsk, 0);
- tcp_done(nsk);
- new_sock = NULL;
+ kernel_sock_shutdown(new_sock, SHUT_RDWR);
ret = 0;
out:
if (rs_tcp)
while (left) {
if (!tinc) {
tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
- arg->gfp);
+ arg->gfp);
if (!tinc) {
desc->error = -ENOMEM;
goto out;
static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
{
struct kvec vec = {
- .iov_base = data,
- .iov_len = len,
+ .iov_base = data,
+ .iov_len = len,
+ };
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
};
- struct msghdr msg = {
- .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
- };
return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
}
/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off)
+ unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_tcp_connection *tc = conn->c_transport_data;
int done = 0;
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
struct workqueue_struct *rds_wq;
EXPORT_SYMBOL_GPL(rds_wq);
-void rds_connect_complete(struct rds_connection *conn)
+void rds_connect_path_complete(struct rds_connection *conn, int curr)
{
- if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
+ if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
printk(KERN_WARNING "%s: Cannot transition to state UP, "
"current state is %d\n",
__func__,
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
}
+EXPORT_SYMBOL_GPL(rds_connect_path_complete);
+
+void rds_connect_complete(struct rds_connection *conn)
+{
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+}
EXPORT_SYMBOL_GPL(rds_connect_complete);
/*
rds_info_iter_unmap(iter);
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++)
- {
+ for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
if (!trans || !trans->stats_info_copy)
continue;
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sock_queue_rcv_skb(sk, skb) == 0) {
+ if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ __sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
/* pin the cipher we need so that the crypto layer doesn't invoke
* keventd to go get it */
rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(rxkad_ci))
- return PTR_ERR(rxkad_ci);
- return 0;
+ return PTR_ERR_OR_ZERO(rxkad_ci);
}
/*
nla_nest_end(skb, nest);
ret = skb->len;
} else
- nla_nest_cancel(skb, nest);
+ nlmsg_trim(skb, b);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
if (NETLINK_CB(cb->skb).portid && ret)
}
EXPORT_SYMBOL_GPL(ife_get_meta_u16);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
{
- mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+ mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
if (!mi->metaval)
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
{
- mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+ mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
if (!mi->metaval)
return -ENOMEM;
}
/* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
*/
static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
- void *val, int len)
+ void *val, int len, bool exists)
{
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
if (!ops) {
ret = -ENOENT;
#ifdef CONFIG_MODULES
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
rtnl_unlock();
request_module("ifemeta%u", metaid);
rtnl_lock();
- spin_lock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid);
#endif
}
}
/* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
*/
static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
- int len)
+ int len, bool atomic)
{
struct tcf_meta_info *mi = NULL;
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
if (!ops)
return -ENOENT;
- mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+ mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
if (!mi) {
/*put back what find_ife_oplist took */
module_put(ops->owner);
mi->metaid = metaid;
mi->ops = ops;
if (len > 0) {
- ret = ops->alloc(mi, metaval);
+ ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
if (ret != 0) {
kfree(mi);
module_put(ops->owner);
int rc = 0;
int installed = 0;
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
- rc = add_metainfo(ife, o->metaid, NULL, 0);
+ rc = add_metainfo(ife, o->metaid, NULL, 0, true);
if (rc == 0)
installed += 1;
}
+ read_unlock(&ife_mod_lock);
if (installed)
return 0;
spin_unlock_bh(&ife->tcf_lock);
}
-/* under ife->tcf_lock */
-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+/* under ife->tcf_lock for existing action */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ bool exists)
{
int len = 0;
int rc = 0;
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len);
+ rc = load_metaops_and_vet(ife, i, val, len, exists);
if (rc != 0)
return rc;
- rc = add_metainfo(ife, i, val, len);
+ rc = add_metainfo(ife, i, val, len, exists);
if (rc)
return rc;
}
saddr = nla_data(tb[TCA_IFE_SMAC]);
}
- spin_lock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action;
if (parm->flags & IFE_ENCODE) {
if (ret == ACT_P_CREATED)
_tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
return err;
}
- err = populate_metalist(ife, tb2);
+ err = populate_metalist(ife, tb2, exists);
if (err)
goto metadata_parse_err;
if (ret == ACT_P_CREATED)
_tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
return err;
}
}
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(tn, a);
}
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
- if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
+ if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+ if (exists)
+ tcf_hash_release(a, bind);
return -EINVAL;
+ }
- if (!tcf_hash_check(tn, index, a, bind)) {
+ if (!exists) {
ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
false);
if (ret)
if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
- skb_push(skb2, skb->mac_len);
+ skb_push_rcsum(skb2, skb->mac_len);
}
/* mirror is always swallowed */
bool peak_present;
};
#define to_police(pc) \
- container_of(pc, struct tcf_police, common)
+ container_of(pc->priv, struct tcf_police, common)
#define POL_TAB_MASK 15
struct nlattr *est, struct tc_action *a,
int ovr, int bind)
{
- unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
- struct tcf_hashinfo *hinfo = tn->hinfo;
int size;
if (nla == NULL)
if (parm->index) {
if (tcf_hash_search(tn, a, parm->index)) {
- police = to_police(a->priv);
+ police = to_police(a);
if (bind) {
police->tcf_bindcnt += 1;
police->tcf_refcnt += 1;
/* not replacing */
return -EEXIST;
}
+ } else {
+ ret = tcf_hash_create(tn, parm->index, NULL, a,
+ sizeof(*police), bind, false);
+ if (ret)
+ return ret;
+ ret = ACT_P_CREATED;
}
- police = kzalloc(sizeof(*police), GFP_KERNEL);
- if (police == NULL)
- return -ENOMEM;
- ret = ACT_P_CREATED;
- police->tcf_refcnt = 1;
- spin_lock_init(&police->tcf_lock);
- if (bind)
- police->tcf_bindcnt = 1;
+ police = to_police(a);
override:
if (parm->rate.rate) {
err = -ENOMEM;
return ret;
police->tcfp_t_c = ktime_get_ns();
- police->tcf_index = parm->index ? parm->index :
- tcf_hash_new_index(tn);
- h = tcf_hash(police->tcf_index, POL_TAB_MASK);
- spin_lock_bh(&hinfo->lock);
- hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
- spin_unlock_bh(&hinfo->lock);
+ tcf_hash_insert(tn, a);
- a->priv = police;
return ret;
failure_unlock:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
if (ret == ACT_P_CREATED)
- kfree(police);
+ tcf_hash_cleanup(a, est);
return err;
}
spin_lock(&police->tcf_lock);
bstats_update(&police->tcf_bstats, skb);
+ tcf_lastuse_update(&police->tcf_tm);
if (police->tcfp_ewma_rate &&
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
.refcnt = police->tcf_refcnt - ref,
.bindcnt = police->tcf_bindcnt - bind,
};
+ struct tcf_t t;
if (police->rate_present)
psched_ratecfg_getrate(&opt.rate, &police->rate);
if (police->tcfp_ewma_rate &&
nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
goto nla_put_failure;
+
+ t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
+ if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
+ goto nla_put_failure;
+
return skb->len;
nla_put_failure:
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
- if (!tc_should_offload(dev, 0))
+ if (!tc_should_offload(dev, tp, 0))
return;
offload.command = TC_CLSFLOWER_DESTROY;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
- if (!tc_should_offload(dev, flags))
+ if (!tc_should_offload(dev, tp, flags))
return;
offload.command = TC_CLSFLOWER_REPLACE;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
- if (!tc_should_offload(dev, 0))
+ if (!tc_should_offload(dev, tp, 0))
return;
offload.command = TC_CLSFLOWER_STATS;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, 0)) {
+ if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
struct tc_to_netdev offload;
int err;
+ if (!tc_should_offload(dev, tp, flags))
+ return tc_skip_sw(flags) ? -EINVAL : 0;
+
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, flags)) {
- offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
- offload.cls_u32->hnode.divisor = h->divisor;
- offload.cls_u32->hnode.handle = h->handle;
- offload.cls_u32->hnode.prio = h->prio;
+ offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
+ offload.cls_u32->hnode.divisor = h->divisor;
+ offload.cls_u32->hnode.handle = h->handle;
+ offload.cls_u32->hnode.prio = h->prio;
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
- if (tc_skip_sw(flags))
- return err;
- }
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->protocol, &offload);
+ if (tc_skip_sw(flags))
+ return err;
return 0;
}
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, 0)) {
+ if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, flags)) {
- offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
- offload.cls_u32->knode.handle = n->handle;
- offload.cls_u32->knode.fshift = n->fshift;
+ if (!tc_should_offload(dev, tp, flags))
+ return tc_skip_sw(flags) ? -EINVAL : 0;
+
+ offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
+ offload.cls_u32->knode.handle = n->handle;
+ offload.cls_u32->knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
- offload.cls_u32->knode.val = n->val;
- offload.cls_u32->knode.mask = n->mask;
+ offload.cls_u32->knode.val = n->val;
+ offload.cls_u32->knode.mask = n->mask;
#else
- offload.cls_u32->knode.val = 0;
- offload.cls_u32->knode.mask = 0;
+ offload.cls_u32->knode.val = 0;
+ offload.cls_u32->knode.mask = 0;
#endif
- offload.cls_u32->knode.sel = &n->sel;
- offload.cls_u32->knode.exts = &n->exts;
- if (n->ht_down)
- offload.cls_u32->knode.link_handle = n->ht_down->handle;
-
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
- if (tc_skip_sw(flags))
- return err;
- }
+ offload.cls_u32->knode.sel = &n->sel;
+ offload.cls_u32->knode.exts = &n->exts;
+ if (n->ht_down)
+ offload.cls_u32->knode.link_handle = n->ht_down->handle;
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->protocol, &offload);
+ if (tc_skip_sw(flags))
+ return err;
return 0;
}
if (tb[TCA_U32_FLAGS]) {
flags = nla_get_u32(tb[TCA_U32_FLAGS]);
if (!tc_flags_valid(flags))
- return err;
+ return -EINVAL;
}
n = (struct tc_u_knode *)*arg;
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
+
+ err = u32_replace_hw_hnode(tp, ht, flags);
+ if (err) {
+ kfree(ht);
+ return err;
+ }
+
RCU_INIT_POINTER(ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht;
- u32_replace_hw_hnode(tp, ht, flags);
return 0;
}
if (throttle)
qdisc_throttled(wd->qdisc);
+ if (wd->last_expires == expires)
+ return;
+
+ wd->last_expires = expires;
hrtimer_start(&wd->timer,
ns_to_ktime(expires),
HRTIMER_MODE_ABS_PINNED);
cl->deficit = cl->quantum;
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return err;
}
bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
if (cl->qdisc->ops->drop) {
len = cl->qdisc->ops->drop(cl->qdisc);
if (len > 0) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
qdisc_reset(cl->qdisc);
}
}
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
+ unsigned int prev_backlog;
+
if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch);
+ prev_backlog = sch->qstats.backlog;
/* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q);
qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch);
+ qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
return NET_XMIT_CN;
}
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
+ unsigned int pkt_len;
bool memory_limited;
idx = fq_codel_classify(skb, sch, &ret);
prev_backlog = sch->qstats.backlog;
prev_qlen = sch->q.qlen;
+ /* save this packet length as it might be dropped by fq_codel_drop() */
+ pkt_len = qdisc_pkt_len(skb);
/* fq_codel_drop() is quite expensive, as it performs a linear search
* in q->backlogs[] to find a fat flow.
* So instead of dropping a single packet, drop half of its backlog
*/
ret = fq_codel_drop(sch, q->drop_batch_size);
- q->drop_overlimit += prev_qlen - sch->q.qlen;
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ q->drop_overlimit += prev_qlen;
if (memory_limited)
- q->drop_overmemory += prev_qlen - sch->q.qlen;
- /* As we dropped packet(s), better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ q->drop_overmemory += prev_qlen;
- return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ /* As we dropped packet(s), better let upper stack know this.
+ * If we dropped a packet for this flow, return NET_XMIT_CN,
+ * but in this case, our parents wont increase their backlogs.
+ */
+ if (ret == idx) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - pkt_len);
+ return NET_XMIT_CN;
+ }
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+ return NET_XMIT_SUCCESS;
}
/* This is the specific function called from codel_dequeue()
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
}
- if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
{
q->gso_skb = skb;
q->qstats.requeues++;
+ qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
+ qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
} else
skb = NULL;
q->eligible = RB_ROOT;
INIT_LIST_HEAD(&q->droplist);
qdisc_watchdog_cancel(&q->watchdog);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
struct hfsc_sched *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt;
- struct hfsc_class *cl;
- unsigned int i;
-
- sch->qstats.backlog = 0;
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
- sch->qstats.backlog += cl->qdisc->qstats.backlog;
- }
qopt.defcls = q->defcls;
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
cl->qstats.drops++;
qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
}
}
qdisc_qstats_overlimit(sch);
- if (likely(next_event > q->now)) {
- if (!test_bit(__QDISC_STATE_DEACTIVATED,
- &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
- ktime_t time = ns_to_ktime(next_event);
- qdisc_throttled(q->watchdog.qdisc);
- hrtimer_start(&q->watchdog.timer, time,
- HRTIMER_MODE_ABS_PINNED);
- }
- } else {
+ if (likely(next_event > q->now))
+ qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
+ else
schedule_work(&q->work);
- }
fin:
return skb;
}
struct htb_sched *q = container_of(work, struct htb_sched, work);
struct Qdisc *sch = q->watchdog.qdisc;
+ rcu_read_lock();
__netif_schedule(qdisc_root(sch));
+ rcu_read_unlock();
}
static int htb_init(struct Qdisc *sch, struct nlattr *opt)
if (!cl->level && cl->un.leaf.q)
qlen = cl->un.leaf.q->q.qlen;
- cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
- cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
+ cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
+ INT_MIN, INT_MAX);
+ cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
+ INT_MIN, INT_MAX);
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
return TC_H_MIN(classid) + 1;
}
+static bool ingress_cl_offload(u32 classid)
+{
+ return true;
+}
+
static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = ingress_find_tcf,
+ .tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
};
}
}
+static bool clsact_cl_offload(u32 classid)
+{
+ return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
+}
+
static unsigned long clsact_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = clsact_find_tcf,
+ .tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put,
};
#endif
if (q->qdisc) {
+ unsigned int pkt_len = qdisc_pkt_len(skb);
int err = qdisc_enqueue(skb, q->qdisc);
- if (unlikely(err != NET_XMIT_SUCCESS)) {
- if (net_xmit_drop_count(err)) {
- qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- qdisc_pkt_len(skb));
- }
+ if (err != NET_XMIT_SUCCESS &&
+ net_xmit_drop_count(err)) {
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1,
+ pkt_len);
}
goto tfifo_dequeue;
}
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
if (skb) {
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
for (prio = q->bands-1; prio >= 0; prio--) {
qdisc = q->queues[prio];
if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
struct prio_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *queues[TCQ_PRIO_BANDS];
+ int oldbands = q->bands, i;
struct tc_prio_qopt *qopt;
- int i;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
return -EINVAL;
}
+ /* Before commit, make sure we can allocate all new qdiscs */
+ for (i = oldbands; i < qopt->bands; i++) {
+ queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, i + 1));
+ if (!queues[i]) {
+ while (i > oldbands)
+ qdisc_destroy(queues[--i]);
+ return -ENOMEM;
+ }
+ }
+
sch_tree_lock(sch);
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
+ for (i = q->bands; i < oldbands; i++) {
struct Qdisc *child = q->queues[i];
- q->queues[i] = &noop_qdisc;
- if (child != &noop_qdisc) {
- qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
- qdisc_destroy(child);
- }
- }
- sch_tree_unlock(sch);
- for (i = 0; i < q->bands; i++) {
- if (q->queues[i] == &noop_qdisc) {
- struct Qdisc *child, *old;
-
- child = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops,
- TC_H_MAKE(sch->handle, i + 1));
- if (child) {
- sch_tree_lock(sch);
- old = q->queues[i];
- q->queues[i] = child;
-
- if (old != &noop_qdisc) {
- qdisc_tree_reduce_backlog(old,
- old->q.qlen,
- old->qstats.backlog);
- qdisc_destroy(old);
- }
- sch_tree_unlock(sch);
- }
- }
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
+ qdisc_destroy(child);
}
+
+ for (i = oldbands; i < q->bands; i++)
+ q->queues[i] = queues[i];
+
+ sch_tree_unlock(sch);
return 0;
}
static int prio_init(struct Qdisc *sch, struct nlattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
- int i;
-
- for (i = 0; i < TCQ_PRIO_BANDS; i++)
- q->queues[i] = &noop_qdisc;
-
- if (opt == NULL) {
+ if (!opt)
return -EINVAL;
- } else {
- int err;
- if ((err = prio_tune(sch, opt)) != 0)
- return err;
- }
- return 0;
+ return prio_tune(sch, opt);
}
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
qdisc_pkt_len(skb));
- if (err)
- return err;
+ if (err) {
+ cl->qstats.drops++;
+ return qdisc_drop(skb, sch);
+ }
}
err = qdisc_enqueue(skb, cl->qdisc);
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
skb = child->dequeue(child);
if (skb) {
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
if (!red_is_idling(&q->vars))
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++;
qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
struct red_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
red_restart(&q->vars);
}
return ret;
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
unsigned int len = 0;
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
qdisc_qstats_drop(sch);
}
q->t_c = now;
q->tokens = toks;
q->ptokens = ptoks;
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
q->t_c = ktime_get_ns();
q->tokens = q->buffer;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
- struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
if (skb_linearize(skb))
goto discard_it;
- sh = sctp_hdr(skb);
-
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
- chunk->sctp_hdr = sh;
+ chunk->sctp_hdr = sctp_hdr(skb);
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
#include <linux/sock_diag.h>
#include <net/sctp/sctp.h>
-extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
- struct sock *sk);
-extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
- struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
-
static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *info);
if (cb->args[4] < cb->args[1])
goto next;
+ if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
+ goto next;
+
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next;
info->sctpi_s_disable_fragments = sp->disable_fragments;
info->sctpi_s_v4mapped = sp->v4mapped;
info->sctpi_s_frag_interleave = sp->frag_interleave;
+ info->sctpi_s_type = sp->type;
return 0;
}
return ERR_PTR(err);
}
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
struct rpc_xprt *xprt)
{
struct rpc_clnt *clnt = NULL;
struct rpc_xprt_switch *xps;
- xps = xprt_switch_alloc(xprt, GFP_KERNEL);
- if (xps == NULL)
- return ERR_PTR(-ENOMEM);
-
+ if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
+ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ xps = args->bc_xprt->xpt_bc_xps;
+ xprt_switch_get(xps);
+ } else {
+ xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+ if (xps == NULL) {
+ xprt_put(xprt);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (xprt->bc_xprt) {
+ xprt_switch_get(xps);
+ xprt->bc_xprt->xpt_bc_xps = xps;
+ }
+ }
clnt = rpc_new_client(args, xps, xprt, NULL);
if (IS_ERR(clnt))
return clnt;
return clnt;
}
-EXPORT_SYMBOL_GPL(rpc_create_xprt);
/**
* rpc_create - create an RPC client and transport with one call
};
char servername[48];
+ if (args->bc_xprt) {
+ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ xprt = args->bc_xprt->xpt_bc_xprt;
+ if (xprt) {
+ xprt_get(xprt);
+ return rpc_create_xprt(args, xprt);
+ }
+ }
+
if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
/* See comment on corresponding get in xs_setup_bc_tcp(): */
if (xprt->xpt_bc_xprt)
xprt_put(xprt->xpt_bc_xprt);
+ if (xprt->xpt_bc_xps)
+ xprt_switch_put(xprt->xpt_bc_xps);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
return xprt;
args->bc_xprt->xpt_bc_xprt = NULL;
+ args->bc_xprt->xpt_bc_xps = NULL;
xprt_put(xprt);
ret = ERR_PTR(-EINVAL);
out_err:
return 0;
}
+/* tipc_bearer_reset_all - reset all links on all bearers
+ */
+void tipc_bearer_reset_all(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = rcu_dereference_rtnl(tn->bearer_list[i]);
+ if (b)
+ tipc_reset_bearer(net, b);
+ }
+}
+
/**
* bearer_disable
*
return 0;
/* Send RESET message even if bearer is detached from device */
- tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+ tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
goto drop;
void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
struct tipc_media *tipc_media_find(const char *name);
+void tipc_bearer_reset_all(struct net *net);
int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
u16 ack = snd_l->snd_nxt - 1;
snd_l->ackers--;
+ rcv_l->bc_peer_is_up = true;
+ rcv_l->state = LINK_ESTABLISHED;
tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
tipc_link_reset(rcv_l);
rcv_l->state = LINK_RESET;
*/
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- int mtyp, rc = 0;
+ int mtyp = 0;
+ int rc = 0;
bool state = false;
bool probe = false;
bool setup = false;
if (!msg_peer_node_is_up(hdr))
return;
- l->bc_peer_is_up = true;
+ /* Open when peer ackowledges our bcast init msg (pkt #1) */
+ if (msg_ack(hdr))
+ l->bc_peer_is_up = true;
+
+ if (!l->bc_peer_is_up)
+ return;
/* Ignore if peers_snd_nxt goes beyond receive window */
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
#include "name_table.h"
#define MAX_FORWARD_SIZE 1024
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_TAILROOM 16
static unsigned int align(unsigned int i)
{
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
+ if (skb_cloned(_skb) &&
+ pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+ goto exit;
+
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
msg_set_origport(hdr, msg_destport(&ohdr));
#define TIPC_MEDIA_INFO_OFFSET 5
-/**
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- * are word aligned for quicker access
- */
-#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-
struct tipc_skb_cb {
void *handle;
struct sk_buff *tail;
struct nlattr **attrs)
{
struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
- nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER],
- NULL);
+ err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
+ attrs[TIPC_NLA_BEARER], NULL);
+ if (err)
+ return err;
return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
nla_data(bearer[TIPC_NLA_BEARER_NAME]),
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+ int err;
- nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+ if (!attrs[TIPC_NLA_LINK])
+ return -EINVAL;
- nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP],
- NULL);
+ err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
+ NULL);
+ if (err)
+ return err;
+
+ if (!link[TIPC_NLA_LINK_PROP])
+ return -EINVAL;
- nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS],
- NULL);
+ err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
+ link[TIPC_NLA_LINK_PROP], NULL);
+ if (err)
+ return err;
+
+ if (!link[TIPC_NLA_LINK_STATS])
+ return -EINVAL;
+
+ err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
+ link[TIPC_NLA_LINK_STATS], NULL);
+ if (err)
+ return err;
name = (char *)TLV_DATA(msg->req);
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
{
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct tipc_link_info link_info;
+ int err;
- nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+ if (!attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
+ NULL);
+ if (err)
+ return err;
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
- strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+ nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));
u32 node, depth, type, lowbound, upbound;
static const char * const scope_str[] = {"", " zone", " cluster",
" node"};
+ int err;
- nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
- attrs[TIPC_NLA_NAME_TABLE], NULL);
+ if (!attrs[TIPC_NLA_NAME_TABLE])
+ return -EINVAL;
- nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL],
- NULL);
+ err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
+ attrs[TIPC_NLA_NAME_TABLE], NULL);
+ if (err)
+ return err;
+
+ if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
+ return -EINVAL;
+
+ err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
+ nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
+ if (err)
+ return err;
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
{
u32 type, lower, upper;
struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+ int err;
- nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL);
+ if (!attrs[TIPC_NLA_PUBL])
+ return -EINVAL;
+
+ err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
+ NULL);
+ if (err)
+ return err;
type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
u32 sock_ref;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
- nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL);
+ if (!attrs[TIPC_NLA_SOCK])
+ return -EINVAL;
+
+ err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
+ NULL);
+ if (err)
+ return err;
sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
struct nlattr **attrs)
{
struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
- nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
- NULL);
+ err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
+ NULL);
+ if (err)
+ return err;
return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
nla_data(media[TIPC_NLA_MEDIA_NAME]),
{
struct tipc_node_info node_info;
struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
+ int err;
- nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);
+ if (!attrs[TIPC_NLA_NODE])
+ return -EINVAL;
+
+ err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
+ NULL);
+ if (err)
+ return err;
node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
{
__be32 id;
struct nlattr *net[TIPC_NLA_NET_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_NET])
+ return -EINVAL;
+
+ err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
+ NULL);
+ if (err)
+ return err;
- nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
rc = tipc_bcast_rcv(net, be->link, skb);
- /* Broadcast link reset may happen at reassembly failure */
- if (rc & TIPC_LINK_DOWN_EVT)
- tipc_node_reset_links(n);
-
/* Broadcast ACKs are sent on a unicast link */
if (rc & TIPC_LINK_SND_BC_ACK) {
tipc_node_read_lock(n);
spin_unlock_bh(&be->inputq2.lock);
tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
}
+
+ if (rc & TIPC_LINK_DOWN_EVT) {
+ /* Reception reassembly failure => reset all links to peer */
+ if (!tipc_link_is_up(be->link))
+ tipc_node_reset_links(n);
+
+ /* Retransmission failure => reset all links to all peers */
+ if (!tipc_link_is_up(tipc_bc_sndlink(net)))
+ tipc_bearer_reset_all(net);
+ }
+
tipc_node_put(n);
}
* @tsk: receiving socket
* @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
+ u32 onode = tsk_own_node(tsk);
struct tipc_msg *hdr = buf_msg(skb);
int mtyp = msg_type(hdr);
bool conn_cong;
if (mtyp == CONN_PROBE) {
msg_set_type(hdr, CONN_PROBE_REPLY);
- tipc_sk_respond(sk, skb, TIPC_OK);
+ if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+ __skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
*
* Returns true if message was added to socket receive queue, otherwise false
*/
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
int usr = msg_user(hdr);
if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
- tipc_sk_proto_rcv(tsk, skb);
+ tipc_sk_proto_rcv(tsk, skb, xmitq);
return false;
}
return true;
reject:
- tipc_sk_respond(sk, skb, err);
+ if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+ __skb_queue_tail(xmitq, skb);
return false;
}
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
unsigned int truesize = skb->truesize;
+ struct sk_buff_head xmitq;
+ u32 dnode, selector;
- if (likely(filter_rcv(sk, skb)))
+ __skb_queue_head_init(&xmitq);
+
+ if (likely(filter_rcv(sk, skb, &xmitq))) {
atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+ return 0;
+ }
+
+ if (skb_queue_empty(&xmitq))
+ return 0;
+
+ /* Send response/rejected message */
+ skb = __skb_dequeue(&xmitq);
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
return 0;
}
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport)
+ u32 dport, struct sk_buff_head *xmitq)
{
+ unsigned long time_limit = jiffies + 2;
+ struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
- struct sk_buff *skb;
- unsigned long time_limit = jiffies + 2;
+ u32 onode;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- filter_rcv(sk, skb);
+ filter_rcv(sk, skb, xmitq);
continue;
}
continue;
/* Overload => reject message back to sender */
- tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ onode = tipc_own_addr(sock_net(sk));
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ __skb_queue_tail(xmitq, skb);
break;
}
}
*/
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
+ struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
+ __skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- tipc_sk_enqueue(inputq, sk, dport);
+ tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
+ /* Send pending response/rejected messages, if any */
+ while ((skb = __skb_dequeue(&xmitq))) {
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
+ }
sock_put(sk);
continue;
}
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
- if (dentry && d_backing_inode(dentry) == i) {
+ if (dentry && d_real_inode(dentry) == i) {
sock_hold(s);
goto found;
}
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
- inode = d_backing_inode(path.dentry);
+ inode = d_real_inode(path.dentry);
err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
- hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
u->path = u_path;
list = &unix_socket_table[hash];
* function will also cleanup rejected sockets, those that reach the connected
* state but leave it before they have been accepted.
*
+ * - Lock ordering for pending or accept queue sockets is:
+ *
+ * lock_sock(listener);
+ * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
+ *
+ * Using explicit nested locking keeps lockdep happy since normally only one
+ * lock of a given class may be taken at a time.
+ *
* - Sockets created by user action will be cleaned up when the user process
* calls close(2), causing our release implementation to be called. Our release
* implementation will perform some cleanup then drop the last reference so our
cleanup = true;
lock_sock(listener);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
if (connected) {
listener->sk_ack_backlog--;
- lock_sock(connected);
+ lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
vconnected = vsock_sk(connected);
/* If the listener socket has received an error, then we should
WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
- WARN_ON(ops->set_tx_power && !ops->get_tx_power);
- WARN_ON(ops->set_antenna && !ops->get_antenna);
alloc_size = sizeof(*rdev) + sizeof_priv;
params.smps_mode = NL80211_SMPS_OFF;
}
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
+ return -EOPNOTSUPP;
+
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
params.acl = parse_acl_data(&rdev->wiphy, info);
if (IS_ERR(params.acl))
return PTR_ERR(params.acl);
}
- params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
- if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
- return -EOPNOTSUPP;
-
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, ¶ms);
if (!err) {
* replace EtherType */
hdrlen += ETH_ALEN + 2;
else
- tmp.h_proto = htons(skb->len);
+ tmp.h_proto = htons(skb->len - hdrlen);
pskb_pull(skb, hdrlen);
* alignment since sizeof(struct ethhdr) is 14.
*/
frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+ if (!frame)
+ return NULL;
skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
- if (dev->netdev_ops->ndo_do_ioctl)
- return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+ if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+ int ret = 0;
+ struct iwreq iwr_lcl;
+ struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+ memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+ iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+ iwr_lcl.u.data.length = iwp_compat->length;
+ iwr_lcl.u.data.flags = iwp_compat->flags;
+
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+ iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+ iwp_compat->length = iwr_lcl.u.data.length;
+ iwp_compat->flags = iwr_lcl.u.data.flags;
+
+ return ret;
+ } else
+#endif
+ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ }
return -EOPNOTSUPP;
}
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump &&
+ $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
($line =~ /\b[0-9a-f]{12,40}\b/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
*.pyc
*.pyo
+constants.py
$(CPP) -E -x c -P $(c_flags) $< > $@ ;\
sed -i '1,/<!-- end-c-headers -->/d;' $@
-$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
- $(call if_changed,gen_constants_py)
+targets += constants.py
+$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
+ $(call if_changed_dep,gen_constants_py)
build_constants_py: $(obj)/constants.py
+ @:
clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
#include <linux/fs.h>
#include <linux/mount.h>
-#include <linux/radix-tree.h>
/* We need to stringify expanded macros so that they can be parsed */
LX_VALUE(MNT_NOATIME)
LX_VALUE(MNT_NODIRATIME)
LX_VALUE(MNT_RELATIME)
-
-/* linux/radix-tree.h */
-LX_VALUE(RADIX_TREE_INDIRECT_PTR)
-LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
-LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
-LX_GDBPARSED(RADIX_TREE_MAP_MASK)
+++ /dev/null
-#
-# gdb helper commands and functions for Linux kernel debugging
-#
-# Radix Tree Parser
-#
-# Copyright (c) 2016 Linaro Ltd
-#
-# Authors:
-# Kieran Bingham <kieran.bingham@linaro.org>
-#
-# This work is licensed under the terms of the GNU GPL version 2.
-#
-
-import gdb
-
-from linux import utils
-from linux import constants
-
-radix_tree_root_type = utils.CachedType("struct radix_tree_root")
-radix_tree_node_type = utils.CachedType("struct radix_tree_node")
-
-
-def is_indirect_ptr(node):
- long_type = utils.get_long_type()
- return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
-
-
-def indirect_to_ptr(node):
- long_type = utils.get_long_type()
- node_type = node.type
- indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
- return indirect_ptr.cast(node_type)
-
-
-def maxindex(height):
- height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
- return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
-
-
-def lookup(root, index):
- if root.type == radix_tree_root_type.get_type().pointer():
- root = root.dereference()
- elif root.type != radix_tree_root_type.get_type():
- raise gdb.GdbError("Must be struct radix_tree_root not {}"
- .format(root.type))
-
- node = root['rnode']
- if node is 0:
- return None
-
- if not (is_indirect_ptr(node)):
- if (index > 0):
- return None
- return node
-
- node = indirect_to_ptr(node)
-
- height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
- if (index > maxindex(height)):
- return None
-
- shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
-
- while True:
- new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
- slot = node['slots'][new_index]
-
- node = slot.cast(node.type.pointer()).dereference()
- if node is 0:
- return None
-
- shift -= constants.LX_RADIX_TREE_MAP_SHIFT
- height -= 1
-
- if (height <= 0):
- break
-
- return node
-
-
-class LxRadixTree(gdb.Function):
- """ Lookup and return a node from a RadixTree.
-
-$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
-If index is omitted, the root node is dereferenced and returned."""
-
- def __init__(self):
- super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
-
- def invoke(self, root, index=0):
- result = lookup(root, index)
- if result is None:
- raise gdb.GdbError("No entry in tree at index {}".format(index))
-
- return result
-
-LxRadixTree()
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
- self.module_paths = arg.split()
+ self.module_paths = [os.path.expanduser(p) for p in arg.split()]
self.module_paths.append(os.getcwd())
# enforce update
import linux.lists
import linux.proc
import linux.constants
- import linux.radixtree
len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
(*type)[0] ? *type : "*");
- if (compatible[0])
+ if ((*compatible)[0])
sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
*compatible);
{
struct common_audit_data sa;
struct apparmor_audit_data aad = {0,};
- char *command, *args = value;
+ char *command, *largs = NULL, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
- /* args points to a PAGE_SIZE buffer, AppArmor requires that
- * the buffer must be null terminated or have size <= PAGE_SIZE -1
- * so that AppArmor can null terminate them
- */
- if (args[size - 1] != '\0') {
- if (size == PAGE_SIZE)
- return -EINVAL;
- args[size] = '\0';
- }
-
/* task can only write its own attributes */
if (current != task)
return -EACCES;
- args = value;
+ /* AppArmor requires that the buffer must be null terminated atm */
+ if (args[size - 1] != '\0') {
+ /* null terminate */
+ largs = args = kmalloc(size + 1, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ memcpy(args, value, size);
+ args[size] = '\0';
+ }
+
+ error = -EINVAL;
args = strim(args);
command = strsep(&args, " ");
if (!args)
- return -EINVAL;
+ goto out;
args = skip_spaces(args);
if (!*args)
- return -EINVAL;
+ goto out;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
goto fail;
} else
/* only support the "current" and "exec" process attributes */
- return -EINVAL;
+ goto fail;
if (!error)
error = size;
+out:
+ kfree(largs);
return error;
fail:
aad.profile = aa_current_profile();
aad.op = OP_SETPROCATTR;
aad.info = name;
- aad.error = -EINVAL;
+ aad.error = error = -EINVAL;
aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
- return -EINVAL;
+ goto out;
}
static int apparmor_task_setrlimit(struct task_struct *task,
case KEYCTL_DH_COMPUTE:
return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
- arg4);
+ arg4, compat_ptr(arg5));
default:
return -EOPNOTSUPP;
}
long keyctl_dh_compute(struct keyctl_dh_params __user *params,
- char __user *buffer, size_t buflen)
+ char __user *buffer, size_t buflen,
+ void __user *reserved)
{
long ret;
MPI base, private, prime, result;
goto out;
}
+ if (reserved) {
+ ret = -EINVAL;
+ goto out;
+ }
+
keylen = mpi_from_key(pcopy.prime, buflen, &prime);
if (keylen < 0 || !prime) {
/* buflen == 0 may be used to query the required buffer size,
#ifdef CONFIG_KEY_DH_OPERATIONS
extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
- size_t);
+ size_t, void __user *);
#else
static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
- char __user *buffer, size_t buflen)
+ char __user *buffer, size_t buflen,
+ void __user *reserved)
{
return -EOPNOTSUPP;
}
mutex_unlock(&key_construction_mutex);
- if (keyring)
+ if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
case KEYCTL_DH_COMPUTE:
return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
- (char __user *) arg3,
- (size_t) arg4);
+ (char __user *) arg3, (size_t) arg4,
+ (void __user *) arg5);
default:
return -EOPNOTSUPP;
struct snd_compr_stream stream;
};
+static void error_delayed_work(struct work_struct *work);
+
/*
* a note on stream states used:
* we use following states in the compressed core
snd_card_unref(compr->card);
return -ENOMEM;
}
+
+ INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
+
data->stream.ops = compr->ops;
data->stream.direction = dirn;
data->stream.private_data = compr->private_data;
struct snd_compr_file *data = f->private_data;
struct snd_compr_runtime *runtime = data->stream.runtime;
+ cancel_delayed_work_sync(&data->stream.error_work);
+
switch (runtime->state) {
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_DRAINING:
avail = snd_compr_calc_avail(stream, &ioctl_avail);
ioctl_avail.avail = avail;
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ return -EBADFD;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ default:
+ break;
+ }
+
if (copy_to_user((__u64 __user *)arg,
&ioctl_avail, sizeof(ioctl_avail)))
return -EFAULT;
switch (stream->runtime->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_XRUN:
case SNDRV_PCM_STATE_SUSPENDED:
case SNDRV_PCM_STATE_DISCONNECTED:
retval = -EBADFD;
goto out;
+ case SNDRV_PCM_STATE_XRUN:
+ retval = -EPIPE;
+ goto out;
}
avail = snd_compr_get_avail(stream);
stream = &data->stream;
mutex_lock(&stream->device->lock);
- if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
+
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_XRUN:
retval = snd_compr_get_poll(stream) | POLLERR;
goto out;
+ default:
+ break;
}
+
poll_wait(f, &stream->runtime->sleep, wait);
avail = snd_compr_get_avail(stream);
return retval;
}
+static void error_delayed_work(struct work_struct *work)
+{
+ struct snd_compr_stream *stream;
+
+ stream = container_of(work, struct snd_compr_stream, error_work.work);
+
+ mutex_lock(&stream->device->lock);
+
+ stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+ wake_up(&stream->runtime->sleep);
+
+ mutex_unlock(&stream->device->lock);
+}
+
+/*
+ * snd_compr_stop_error: Report a fatal error on a stream
+ * @stream: pointer to stream
+ * @state: state to transition the stream to
+ *
+ * Stop the stream and set its state.
+ *
+ * Should be called with compressed device lock held.
+ */
+int snd_compr_stop_error(struct snd_compr_stream *stream,
+ snd_pcm_state_t state)
+{
+ if (stream->runtime->state == state)
+ return 0;
+
+ stream->runtime->state = state;
+
+ pr_debug("Changing state to: %d\n", state);
+
+ queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_compr_stop_error);
+
static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
{
int ret;
if (snd_BUG_ON(!card || !id))
return;
+ if (card->shutdown)
+ return;
read_lock(&card->ctl_files_rwlock);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
card->mixer_oss_change_count++;
}
EXPORT_SYMBOL(snd_pcm_new_internal);
+static void free_chmap(struct snd_pcm_str *pstr)
+{
+ if (pstr->chmap_kctl) {
+ snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
+ pstr->chmap_kctl = NULL;
+ }
+}
+
static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
{
struct snd_pcm_substream *substream, *substream_next;
kfree(setup);
}
#endif
+ free_chmap(pstr);
if (pstr->substream_count)
put_device(&pstr->dev);
}
for (cidx = 0; cidx < 2; cidx++) {
if (!pcm->internal)
snd_unregister_device(&pcm->streams[cidx].dev);
- if (pcm->streams[cidx].chmap_kctl) {
- snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
- pcm->streams[cidx].chmap_kctl = NULL;
- }
+ free_chmap(&pcm->streams[cidx]);
}
mutex_unlock(&pcm->open_mutex);
mutex_unlock(®ister_mutex);
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
+ tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
}
spin_lock_irq(&tu->qlock);
- tu->qused--;
if (err < 0)
goto _error;
result += unit;
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
{
+ hrtimer_cancel(&dpcm->timer);
tasklet_kill(&dpcm->tasklet);
}
err = reg_raw_write(codec, reg, val);
if (err == -EAGAIN) {
err = snd_hdac_power_up_pm(codec);
- if (!err)
+ if (err >= 0)
err = reg_raw_write(codec, reg, val);
snd_hdac_power_down_pm(codec);
}
err = reg_raw_read(codec, reg, val, uncached);
if (err == -EAGAIN) {
err = snd_hdac_power_up_pm(codec);
- if (!err)
+ if (err >= 0)
err = reg_raw_read(codec, reg, val, uncached);
snd_hdac_power_down_pm(codec);
}
int page, p, pp, delta, i;
page =
- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
- WT_SUBBUF_MASK)
- >> WT_SUBBUF_SHIFT;
+ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
+ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
if (dma->nr_periods >= 4)
delta = (page - dma->period_real) & 3;
else {
u32 pipe_alloc_mask;
int err;
- commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
+ commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
if (commpage_bak == NULL)
return -ENOMEM;
commpage = chip->comm_page;
- memcpy(commpage_bak, commpage, sizeof(struct comm_page));
+ memcpy(commpage_bak, commpage, sizeof(*commpage));
err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
if (err < 0) {
for (n = 0; n < spec->paths.used; n++) {
path = snd_array_elem(&spec->paths, n);
+ if (!path->depth)
+ continue;
if (path->path[0] == nid ||
path->path[path->depth - 1] == nid) {
bool pin_old = path->pin_enabled;
#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
+#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
if (use_vga_switcheroo(hda)) {
if (chip->disabled && hda->probe_continued)
snd_hda_unlock_devices(&chip->bus);
- if (hda->vga_switcheroo_registered)
+ if (hda->vga_switcheroo_registered) {
vga_switcheroo_unregister_client(chip->pci);
+ vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
+ }
}
if (bus->chip_init) {
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ /* Kabylake */
+ { PCI_DEVICE(0x8086, 0xa171),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ /* Kabylake-LP */
+ { PCI_DEVICE(0x8086, 0x9d71),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ /* Kabylake-H */
+ { PCI_DEVICE(0x8086, 0xa2f0),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x157a),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0x15b3),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x793b),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x7919),
/*
* Register access ops. Tegra HDA register access is DWORD only.
*/
-static void hda_tegra_writel(u32 value, u32 *addr)
+static void hda_tegra_writel(u32 value, u32 __iomem *addr)
{
writel(value, addr);
}
-static u32 hda_tegra_readl(u32 *addr)
+static u32 hda_tegra_readl(u32 __iomem *addr)
{
return readl(addr);
}
-static void hda_tegra_writew(u16 value, u16 *addr)
+static void hda_tegra_writew(u16 value, u16 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
writel(v, dword_addr);
}
-static u16 hda_tegra_readw(u16 *addr)
+static u16 hda_tegra_readw(u16 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
return (v >> shift) & 0xffff;
}
-static void hda_tegra_writeb(u8 value, u8 *addr)
+static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
writel(v, dword_addr);
}
-static u8 hda_tegra_readb(u8 *addr)
+static u8 hda_tegra_readb(u8 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
alc_update_coef_idx(codec, 0x10, 1<<15, 0);
break;
case 0x10ec0662:
ALC269_TYPE_ALC256,
ALC269_TYPE_ALC225,
ALC269_TYPE_ALC294,
+ ALC269_TYPE_ALC700,
};
/*
case ALC269_TYPE_ALC256:
case ALC269_TYPE_ALC225:
case ALC269_TYPE_ALC294:
+ case ALC269_TYPE_ALC700:
ssids = alc269_ssids;
break;
default:
static void alc_headset_mode_unplugged(struct hda_codec *codec)
{
static struct coef_fw coef0255[] = {
- WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
{}
};
+ static struct coef_fw coef0255_1[] = {
+ WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+ {}
+ };
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x1b, 0x0c0b),
WRITE_COEF(0x45, 0xc429),
switch (codec->core.vendor_id) {
case 0x10ec0255:
+ alc_process_coef_fw(codec, coef0255_1);
+ alc_process_coef_fw(codec, coef0255);
+ break;
case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
{}
};
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+ WRITE_COEF(0x1b, 0x0c6b),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x45, 0xd429),
WRITE_COEF(0x1b, 0x0c2b),
switch (codec->core.vendor_id) {
case 0x10ec0255:
- case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0256);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
{}
};
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+ WRITE_COEF(0x1b, 0x0c6b),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x45, 0xe429),
WRITE_COEF(0x1b, 0x0c2b),
switch (codec->core.vendor_id) {
case 0x10ec0255:
- case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0256);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
static void alc255_set_default_jack_type(struct hda_codec *codec)
{
/* Set to iphone type */
- static struct coef_fw fw[] = {
+ static struct coef_fw alc255fw[] = {
WRITE_COEF(0x1b, 0x880b),
WRITE_COEF(0x45, 0xd089),
WRITE_COEF(0x1b, 0x080b),
WRITE_COEF(0x1b, 0x0c0b),
{}
};
- alc_process_coef_fw(codec, fw);
+ static struct coef_fw alc256fw[] = {
+ WRITE_COEF(0x1b, 0x884b),
+ WRITE_COEF(0x45, 0xd089),
+ WRITE_COEF(0x1b, 0x084b),
+ WRITE_COEF(0x46, 0x0004),
+ WRITE_COEF(0x1b, 0x0c4b),
+ {}
+ };
+ switch (codec->core.vendor_id) {
+ case 0x10ec0255:
+ alc_process_coef_fw(codec, alc255fw);
+ break;
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, alc256fw);
+ break;
+ }
msleep(30);
}
SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
{}
};
#define ALC225_STANDARD_PINS \
- {0x12, 0xb7a60130}, \
{0x21, 0x04211020}
#define ALC256_STANDARD_PINS \
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x14, 0x901701a0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x14, 0x901701b0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60150},
{0x14, 0x901701a0}),
SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60150},
{0x14, 0x901701b0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x1b, 0x90170110}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
{0x12, 0x90a60180},
{0x14, 0x90170130},
{0x21, 0x02211040}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60180},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160},
{0x14, 0x90170120},
{0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60170},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
case 0x10ec0294:
spec->codec_variant = ALC269_TYPE_ALC294;
break;
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
+ spec->codec_variant = ALC269_TYPE_ALC700;
+ spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+ break;
+
}
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
+ HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
config SND_ATMEL_SOC_PDC
tristate
+ depends on HAS_DMA
default m if SND_ATMEL_SOC_SSC_PDC=m && SND_ATMEL_SOC_SSC=m
default y if SND_ATMEL_SOC_SSC_PDC=y || (SND_ATMEL_SOC_SSC_PDC=m && SND_ATMEL_SOC_SSC=y)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no memory resource\n");
- return -ENXIO;
- }
-
io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(io_base)) {
ret = PTR_ERR(io_base);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no memory resource\n");
- return -ENXIO;
- }
-
io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(io_base)) {
ret = PTR_ERR(io_base);
return ret;
}
- dma_params = &ssc_dma_params[dai->id][dir];
+ dma_params = &ssc_dma_params[pdev->id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
Say Y or M if you want to add support for codecs attached to
the BCM2835 I2S interface. You will also need
to select the audio interfaces to support below.
+
+config SND_SOC_CYGNUS
+ tristate "SoC platform audio for Broadcom Cygnus chips"
+ depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ help
+ Say Y if you want to add support for ASoC audio on Broadcom
+ Cygnus chips (bcm958300, bcm958305, bcm911360)
+
+ If you don't know what to do here, say N.
\ No newline at end of file
obj-$(CONFIG_SND_BCM2835_SOC_I2S) += snd-soc-bcm2835-i2s.o
+# CYGNUS Platform Support
+snd-soc-cygnus-objs := cygnus-pcm.o cygnus-ssp.o
+
+obj-$(CONFIG_SND_SOC_CYGNUS) += snd-soc-cygnus.o
+
--- /dev/null
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "cygnus-ssp.h"
+
+/* Register offset needed for ASoC PCM module */
+
+#define INTH_R5F_STATUS_OFFSET 0x040
+#define INTH_R5F_CLEAR_OFFSET 0x048
+#define INTH_R5F_MASK_SET_OFFSET 0x050
+#define INTH_R5F_MASK_CLEAR_OFFSET 0x054
+
+#define BF_REARM_FREE_MARK_OFFSET 0x344
+#define BF_REARM_FULL_MARK_OFFSET 0x348
+
+/* Ring Buffer Ctrl Regs --- Start */
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_RDADDR_REG_BASE */
+#define SRC_RBUF_0_RDADDR_OFFSET 0x500
+#define SRC_RBUF_1_RDADDR_OFFSET 0x518
+#define SRC_RBUF_2_RDADDR_OFFSET 0x530
+#define SRC_RBUF_3_RDADDR_OFFSET 0x548
+#define SRC_RBUF_4_RDADDR_OFFSET 0x560
+#define SRC_RBUF_5_RDADDR_OFFSET 0x578
+#define SRC_RBUF_6_RDADDR_OFFSET 0x590
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_WRADDR_REG_BASE */
+#define SRC_RBUF_0_WRADDR_OFFSET 0x504
+#define SRC_RBUF_1_WRADDR_OFFSET 0x51c
+#define SRC_RBUF_2_WRADDR_OFFSET 0x534
+#define SRC_RBUF_3_WRADDR_OFFSET 0x54c
+#define SRC_RBUF_4_WRADDR_OFFSET 0x564
+#define SRC_RBUF_5_WRADDR_OFFSET 0x57c
+#define SRC_RBUF_6_WRADDR_OFFSET 0x594
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_BASEADDR_REG_BASE */
+#define SRC_RBUF_0_BASEADDR_OFFSET 0x508
+#define SRC_RBUF_1_BASEADDR_OFFSET 0x520
+#define SRC_RBUF_2_BASEADDR_OFFSET 0x538
+#define SRC_RBUF_3_BASEADDR_OFFSET 0x550
+#define SRC_RBUF_4_BASEADDR_OFFSET 0x568
+#define SRC_RBUF_5_BASEADDR_OFFSET 0x580
+#define SRC_RBUF_6_BASEADDR_OFFSET 0x598
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_ENDADDR_REG_BASE */
+#define SRC_RBUF_0_ENDADDR_OFFSET 0x50c
+#define SRC_RBUF_1_ENDADDR_OFFSET 0x524
+#define SRC_RBUF_2_ENDADDR_OFFSET 0x53c
+#define SRC_RBUF_3_ENDADDR_OFFSET 0x554
+#define SRC_RBUF_4_ENDADDR_OFFSET 0x56c
+#define SRC_RBUF_5_ENDADDR_OFFSET 0x584
+#define SRC_RBUF_6_ENDADDR_OFFSET 0x59c
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_FREE_MARK_REG_BASE */
+#define SRC_RBUF_0_FREE_MARK_OFFSET 0x510
+#define SRC_RBUF_1_FREE_MARK_OFFSET 0x528
+#define SRC_RBUF_2_FREE_MARK_OFFSET 0x540
+#define SRC_RBUF_3_FREE_MARK_OFFSET 0x558
+#define SRC_RBUF_4_FREE_MARK_OFFSET 0x570
+#define SRC_RBUF_5_FREE_MARK_OFFSET 0x588
+#define SRC_RBUF_6_FREE_MARK_OFFSET 0x5a0
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_RDADDR_REG_BASE */
+#define DST_RBUF_0_RDADDR_OFFSET 0x5c0
+#define DST_RBUF_1_RDADDR_OFFSET 0x5d8
+#define DST_RBUF_2_RDADDR_OFFSET 0x5f0
+#define DST_RBUF_3_RDADDR_OFFSET 0x608
+#define DST_RBUF_4_RDADDR_OFFSET 0x620
+#define DST_RBUF_5_RDADDR_OFFSET 0x638
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_WRADDR_REG_BASE */
+#define DST_RBUF_0_WRADDR_OFFSET 0x5c4
+#define DST_RBUF_1_WRADDR_OFFSET 0x5dc
+#define DST_RBUF_2_WRADDR_OFFSET 0x5f4
+#define DST_RBUF_3_WRADDR_OFFSET 0x60c
+#define DST_RBUF_4_WRADDR_OFFSET 0x624
+#define DST_RBUF_5_WRADDR_OFFSET 0x63c
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_BASEADDR_REG_BASE */
+#define DST_RBUF_0_BASEADDR_OFFSET 0x5c8
+#define DST_RBUF_1_BASEADDR_OFFSET 0x5e0
+#define DST_RBUF_2_BASEADDR_OFFSET 0x5f8
+#define DST_RBUF_3_BASEADDR_OFFSET 0x610
+#define DST_RBUF_4_BASEADDR_OFFSET 0x628
+#define DST_RBUF_5_BASEADDR_OFFSET 0x640
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_ENDADDR_REG_BASE */
+#define DST_RBUF_0_ENDADDR_OFFSET 0x5cc
+#define DST_RBUF_1_ENDADDR_OFFSET 0x5e4
+#define DST_RBUF_2_ENDADDR_OFFSET 0x5fc
+#define DST_RBUF_3_ENDADDR_OFFSET 0x614
+#define DST_RBUF_4_ENDADDR_OFFSET 0x62c
+#define DST_RBUF_5_ENDADDR_OFFSET 0x644
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_FULL_MARK_REG_BASE */
+#define DST_RBUF_0_FULL_MARK_OFFSET 0x5d0
+#define DST_RBUF_1_FULL_MARK_OFFSET 0x5e8
+#define DST_RBUF_2_FULL_MARK_OFFSET 0x600
+#define DST_RBUF_3_FULL_MARK_OFFSET 0x618
+#define DST_RBUF_4_FULL_MARK_OFFSET 0x630
+#define DST_RBUF_5_FULL_MARK_OFFSET 0x648
+/* Ring Buffer Ctrl Regs --- End */
+
+/* Error Status Regs --- Start */
+/* AUD_FMM_BF_ESR_ESRX_STATUS_REG_BASE */
+#define ESR0_STATUS_OFFSET 0x900
+#define ESR1_STATUS_OFFSET 0x918
+#define ESR2_STATUS_OFFSET 0x930
+#define ESR3_STATUS_OFFSET 0x948
+#define ESR4_STATUS_OFFSET 0x960
+
+/* AUD_FMM_BF_ESR_ESRX_STATUS_CLEAR_REG_BASE */
+#define ESR0_STATUS_CLR_OFFSET 0x908
+#define ESR1_STATUS_CLR_OFFSET 0x920
+#define ESR2_STATUS_CLR_OFFSET 0x938
+#define ESR3_STATUS_CLR_OFFSET 0x950
+#define ESR4_STATUS_CLR_OFFSET 0x968
+
+/* AUD_FMM_BF_ESR_ESRX_MASK_REG_BASE */
+#define ESR0_MASK_STATUS_OFFSET 0x90c
+#define ESR1_MASK_STATUS_OFFSET 0x924
+#define ESR2_MASK_STATUS_OFFSET 0x93c
+#define ESR3_MASK_STATUS_OFFSET 0x954
+#define ESR4_MASK_STATUS_OFFSET 0x96c
+
+/* AUD_FMM_BF_ESR_ESRX_MASK_SET_REG_BASE */
+#define ESR0_MASK_SET_OFFSET 0x910
+#define ESR1_MASK_SET_OFFSET 0x928
+#define ESR2_MASK_SET_OFFSET 0x940
+#define ESR3_MASK_SET_OFFSET 0x958
+#define ESR4_MASK_SET_OFFSET 0x970
+
+/* AUD_FMM_BF_ESR_ESRX_MASK_CLEAR_REG_BASE */
+#define ESR0_MASK_CLR_OFFSET 0x914
+#define ESR1_MASK_CLR_OFFSET 0x92c
+#define ESR2_MASK_CLR_OFFSET 0x944
+#define ESR3_MASK_CLR_OFFSET 0x95c
+#define ESR4_MASK_CLR_OFFSET 0x974
+/* Error Status Regs --- End */
+
+#define R5F_ESR0_SHIFT 0 /* esr0 = fifo underflow */
+#define R5F_ESR1_SHIFT 1 /* esr1 = ringbuf underflow */
+#define R5F_ESR2_SHIFT 2 /* esr2 = ringbuf overflow */
+#define R5F_ESR3_SHIFT 3 /* esr3 = freemark */
+#define R5F_ESR4_SHIFT 4 /* esr4 = fullmark */
+
+
+/* Mask for R5F register. Set all relevant interrupt for playback handler */
+#define ANY_PLAYBACK_IRQ (BIT(R5F_ESR0_SHIFT) | \
+ BIT(R5F_ESR1_SHIFT) | \
+ BIT(R5F_ESR3_SHIFT))
+
+/* Mask for R5F register. Set all relevant interrupt for capture handler */
+#define ANY_CAPTURE_IRQ (BIT(R5F_ESR2_SHIFT) | BIT(R5F_ESR4_SHIFT))
+
+/*
+ * PERIOD_BYTES_MIN is the number of bytes to at which the interrupt will tick.
+ * This number should be a multiple of 256. Minimum value is 256
+ */
+#define PERIOD_BYTES_MIN 0x100
+
+static const struct snd_pcm_hardware cygnus_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+
+ /* A period is basically an interrupt */
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = 0x10000,
+
+ /* period_min/max gives range of approx interrupts per buffer */
+ .periods_min = 2,
+ .periods_max = 8,
+
+ /*
+ * maximum buffer size in bytes = period_bytes_max * periods_max
+ * We allocate this amount of data for each enabled channel
+ */
+ .buffer_bytes_max = 4 * 0x8000,
+};
+
+static u64 cygnus_dma_dmamask = DMA_BIT_MASK(32);
+
+static struct cygnus_aio_port *cygnus_dai_get_dma_data(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+
+ return snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+}
+
+static void ringbuf_set_initial(void __iomem *audio_io,
+ struct ringbuf_regs *p_rbuf,
+ bool is_playback,
+ u32 start,
+ u32 periodsize,
+ u32 bufsize)
+{
+ u32 initial_rd;
+ u32 initial_wr;
+ u32 end;
+ u32 fmark_val; /* free or full mark */
+
+ p_rbuf->period_bytes = periodsize;
+ p_rbuf->buf_size = bufsize;
+
+ if (is_playback) {
+ /* Set the pointers to indicate full (flip uppermost bit) */
+ initial_rd = start;
+ initial_wr = initial_rd ^ BIT(31);
+ } else {
+ /* Set the pointers to indicate empty */
+ initial_wr = start;
+ initial_rd = initial_wr;
+ }
+
+ end = start + bufsize - 1;
+
+ /*
+ * The interrupt will fire when free/full mark is *exceeded*
+ * The fmark value must be multiple of PERIOD_BYTES_MIN so set fmark
+ * to be PERIOD_BYTES_MIN less than the period size.
+ */
+ fmark_val = periodsize - PERIOD_BYTES_MIN;
+
+ writel(start, audio_io + p_rbuf->baseaddr);
+ writel(end, audio_io + p_rbuf->endaddr);
+ writel(fmark_val, audio_io + p_rbuf->fmark);
+ writel(initial_rd, audio_io + p_rbuf->rdaddr);
+ writel(initial_wr, audio_io + p_rbuf->wraddr);
+}
+
+static int configure_ringbuf_regs(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ struct ringbuf_regs *p_rbuf;
+ int status = 0;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ /* Map the ssp portnum to a set of ring buffers. */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ p_rbuf = &aio->play_rb_regs;
+
+ switch (aio->portnum) {
+ case 0:
+ *p_rbuf = RINGBUF_REG_PLAYBACK(0);
+ break;
+ case 1:
+ *p_rbuf = RINGBUF_REG_PLAYBACK(2);
+ break;
+ case 2:
+ *p_rbuf = RINGBUF_REG_PLAYBACK(4);
+ break;
+ case 3: /* SPDIF */
+ *p_rbuf = RINGBUF_REG_PLAYBACK(6);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ } else {
+ p_rbuf = &aio->capture_rb_regs;
+
+ switch (aio->portnum) {
+ case 0:
+ *p_rbuf = RINGBUF_REG_CAPTURE(0);
+ break;
+ case 1:
+ *p_rbuf = RINGBUF_REG_CAPTURE(2);
+ break;
+ case 2:
+ *p_rbuf = RINGBUF_REG_CAPTURE(4);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ }
+
+ return status;
+}
+
+static struct ringbuf_regs *get_ringbuf(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ struct ringbuf_regs *p_rbuf = NULL;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ p_rbuf = &aio->play_rb_regs;
+ else
+ p_rbuf = &aio->capture_rb_regs;
+
+ return p_rbuf;
+}
+
+static void enable_intr(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ u32 clear_mask;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ /* The port number maps to the bit position to be cleared */
+ clear_mask = BIT(aio->portnum);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Clear interrupt status before enabling them */
+ writel(clear_mask, aio->cygaud->audio + ESR0_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR1_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR3_STATUS_CLR_OFFSET);
+ /* Unmask the interrupts of the given port*/
+ writel(clear_mask, aio->cygaud->audio + ESR0_MASK_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR1_MASK_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR3_MASK_CLR_OFFSET);
+
+ writel(ANY_PLAYBACK_IRQ,
+ aio->cygaud->audio + INTH_R5F_MASK_CLEAR_OFFSET);
+ } else {
+ writel(clear_mask, aio->cygaud->audio + ESR2_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR4_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR2_MASK_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR4_MASK_CLR_OFFSET);
+
+ writel(ANY_CAPTURE_IRQ,
+ aio->cygaud->audio + INTH_R5F_MASK_CLEAR_OFFSET);
+ }
+
+}
+
+static void disable_intr(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct cygnus_aio_port *aio;
+ u32 set_mask;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s on port %d\n", __func__, aio->portnum);
+
+ /* The port number maps to the bit position to be set */
+ set_mask = BIT(aio->portnum);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Mask the interrupts of the given port*/
+ writel(set_mask, aio->cygaud->audio + ESR0_MASK_SET_OFFSET);
+ writel(set_mask, aio->cygaud->audio + ESR1_MASK_SET_OFFSET);
+ writel(set_mask, aio->cygaud->audio + ESR3_MASK_SET_OFFSET);
+ } else {
+ writel(set_mask, aio->cygaud->audio + ESR2_MASK_SET_OFFSET);
+ writel(set_mask, aio->cygaud->audio + ESR4_MASK_SET_OFFSET);
+ }
+
+}
+
+static int cygnus_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ enable_intr(substream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ disable_intr(substream);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void cygnus_pcm_period_elapsed(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ struct ringbuf_regs *p_rbuf = NULL;
+ u32 regval;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ p_rbuf = get_ringbuf(substream);
+
+ /*
+ * If free/full mark interrupt occurs, provide timestamp
+ * to ALSA and update appropriate idx by period_bytes
+ */
+ snd_pcm_period_elapsed(substream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Set the ring buffer to full */
+ regval = readl(aio->cygaud->audio + p_rbuf->rdaddr);
+ regval = regval ^ BIT(31);
+ writel(regval, aio->cygaud->audio + p_rbuf->wraddr);
+ } else {
+ /* Set the ring buffer to empty */
+ regval = readl(aio->cygaud->audio + p_rbuf->wraddr);
+ writel(regval, aio->cygaud->audio + p_rbuf->rdaddr);
+ }
+}
+
+/*
+ * ESR0/1/3 status Description
+ * 0x1 I2S0_out port caused interrupt
+ * 0x2 I2S1_out port caused interrupt
+ * 0x4 I2S2_out port caused interrupt
+ * 0x8 SPDIF_out port caused interrupt
+ */
+static void handle_playback_irq(struct cygnus_audio *cygaud)
+{
+ void __iomem *audio_io;
+ u32 port;
+ u32 esr_status0, esr_status1, esr_status3;
+
+ audio_io = cygaud->audio;
+
+ /*
+ * ESR status gets updates with/without interrupts enabled.
+ * So, check the ESR mask, which provides interrupt enable/
+ * disable status and use it to determine which ESR status
+ * should be serviced.
+ */
+ esr_status0 = readl(audio_io + ESR0_STATUS_OFFSET);
+ esr_status0 &= ~readl(audio_io + ESR0_MASK_STATUS_OFFSET);
+ esr_status1 = readl(audio_io + ESR1_STATUS_OFFSET);
+ esr_status1 &= ~readl(audio_io + ESR1_MASK_STATUS_OFFSET);
+ esr_status3 = readl(audio_io + ESR3_STATUS_OFFSET);
+ esr_status3 &= ~readl(audio_io + ESR3_MASK_STATUS_OFFSET);
+
+ for (port = 0; port < CYGNUS_MAX_PLAYBACK_PORTS; port++) {
+ u32 esrmask = BIT(port);
+
+ /*
+ * Ringbuffer or FIFO underflow
+ * If we get this interrupt then, it is also true that we have
+ * not yet responded to the freemark interrupt.
+ * Log a debug message. The freemark handler below will
+ * handle getting everything going again.
+ */
+ if ((esrmask & esr_status1) || (esrmask & esr_status0)) {
+ dev_dbg(cygaud->dev,
+ "Underrun: esr0=0x%x, esr1=0x%x esr3=0x%x\n",
+ esr_status0, esr_status1, esr_status3);
+ }
+
+ /*
+ * Freemark is hit. This is the normal interrupt.
+ * In typical operation the read and write regs will be equal
+ */
+ if (esrmask & esr_status3) {
+ struct snd_pcm_substream *playstr;
+
+ playstr = cygaud->portinfo[port].play_stream;
+ cygnus_pcm_period_elapsed(playstr);
+ }
+ }
+
+ /* Clear ESR interrupt */
+ writel(esr_status0, audio_io + ESR0_STATUS_CLR_OFFSET);
+ writel(esr_status1, audio_io + ESR1_STATUS_CLR_OFFSET);
+ writel(esr_status3, audio_io + ESR3_STATUS_CLR_OFFSET);
+ /* Rearm freemark logic by writing 1 to the correct bit */
+ writel(esr_status3, audio_io + BF_REARM_FREE_MARK_OFFSET);
+}
+
+/*
+ * ESR2/4 status Description
+ * 0x1 I2S0_in port caused interrupt
+ * 0x2 I2S1_in port caused interrupt
+ * 0x4 I2S2_in port caused interrupt
+ */
+static void handle_capture_irq(struct cygnus_audio *cygaud)
+{
+ void __iomem *audio_io;
+ u32 port;
+ u32 esr_status2, esr_status4;
+
+ audio_io = cygaud->audio;
+
+ /*
+ * ESR status gets updates with/without interrupts enabled.
+ * So, check the ESR mask, which provides interrupt enable/
+ * disable status and use it to determine which ESR status
+ * should be serviced.
+ */
+ esr_status2 = readl(audio_io + ESR2_STATUS_OFFSET);
+ esr_status2 &= ~readl(audio_io + ESR2_MASK_STATUS_OFFSET);
+ esr_status4 = readl(audio_io + ESR4_STATUS_OFFSET);
+ esr_status4 &= ~readl(audio_io + ESR4_MASK_STATUS_OFFSET);
+
+ for (port = 0; port < CYGNUS_MAX_CAPTURE_PORTS; port++) {
+ u32 esrmask = BIT(port);
+
+ /*
+ * Ringbuffer or FIFO overflow
+ * If we get this interrupt then, it is also true that we have
+ * not yet responded to the fullmark interrupt.
+ * Log a debug message. The fullmark handler below will
+ * handle getting everything going again.
+ */
+ if (esrmask & esr_status2)
+ dev_dbg(cygaud->dev,
+ "Overflow: esr2=0x%x\n", esr_status2);
+
+ if (esrmask & esr_status4) {
+ struct snd_pcm_substream *capstr;
+
+ capstr = cygaud->portinfo[port].capture_stream;
+ cygnus_pcm_period_elapsed(capstr);
+ }
+ }
+
+ writel(esr_status2, audio_io + ESR2_STATUS_CLR_OFFSET);
+ writel(esr_status4, audio_io + ESR4_STATUS_CLR_OFFSET);
+ /* Rearm fullmark logic by writing 1 to the correct bit */
+ writel(esr_status4, audio_io + BF_REARM_FULL_MARK_OFFSET);
+}
+
+static irqreturn_t cygnus_dma_irq(int irq, void *data)
+{
+ u32 r5_status;
+ struct cygnus_audio *cygaud = data;
+
+ /*
+ * R5 status bits Description
+ * 0 ESR0 (playback FIFO interrupt)
+ * 1 ESR1 (playback rbuf interrupt)
+ * 2 ESR2 (capture rbuf interrupt)
+ * 3 ESR3 (Freemark play. interrupt)
+ * 4 ESR4 (Fullmark capt. interrupt)
+ */
+ r5_status = readl(cygaud->audio + INTH_R5F_STATUS_OFFSET);
+
+ if (!(r5_status & (ANY_PLAYBACK_IRQ | ANY_CAPTURE_IRQ)))
+ return IRQ_NONE;
+
+ /* If playback interrupt happened */
+ if (ANY_PLAYBACK_IRQ & r5_status) {
+ handle_playback_irq(cygaud);
+ writel(ANY_PLAYBACK_IRQ & r5_status,
+ cygaud->audio + INTH_R5F_CLEAR_OFFSET);
+ }
+
+ /* If capture interrupt happened */
+ if (ANY_CAPTURE_IRQ & r5_status) {
+ handle_capture_irq(cygaud);
+ writel(ANY_CAPTURE_IRQ & r5_status,
+ cygaud->audio + INTH_R5F_CLEAR_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cygnus_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cygnus_aio_port *aio;
+ int ret;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ if (!aio)
+ return -ENODEV;
+
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ snd_soc_set_runtime_hwparams(substream, &cygnus_pcm_hw);
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, PERIOD_BYTES_MIN);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, PERIOD_BYTES_MIN);
+ if (ret < 0)
+ return ret;
+ /*
+ * Keep track of which substream belongs to which port.
+ * This info is needed by snd_pcm_period_elapsed() in irq_handler
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->play_stream = substream;
+ else
+ aio->capture_stream = substream;
+
+ return 0;
+}
+
+static int cygnus_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct cygnus_aio_port *aio;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->play_stream = NULL;
+ else
+ aio->capture_stream = NULL;
+
+ if (!aio->play_stream && !aio->capture_stream)
+ dev_dbg(rtd->cpu_dai->dev, "freed port %d\n", aio->portnum);
+
+ return 0;
+}
+
+static int cygnus_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cygnus_aio_port *aio;
+ int ret = 0;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ runtime->dma_bytes = params_buffer_bytes(params);
+
+ return ret;
+}
+
+static int cygnus_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct cygnus_aio_port *aio;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ snd_pcm_set_runtime_buffer(substream, NULL);
+ return 0;
+}
+
+static int cygnus_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cygnus_aio_port *aio;
+ unsigned long bufsize, periodsize;
+ int ret = 0;
+ bool is_play;
+ u32 start;
+ struct ringbuf_regs *p_rbuf = NULL;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ bufsize = snd_pcm_lib_buffer_bytes(substream);
+ periodsize = snd_pcm_lib_period_bytes(substream);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s (buf_size %lu) (period_size %lu)\n",
+ __func__, bufsize, periodsize);
+
+ configure_ringbuf_regs(substream);
+
+ p_rbuf = get_ringbuf(substream);
+
+ start = runtime->dma_addr;
+
+ is_play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 1 : 0;
+
+ ringbuf_set_initial(aio->cygaud->audio, p_rbuf, is_play, start,
+ periodsize, bufsize);
+
+ return ret;
+}
+
+static snd_pcm_uframes_t cygnus_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ unsigned int res = 0, cur = 0, base = 0;
+ struct ringbuf_regs *p_rbuf = NULL;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ /*
+ * Get the offset of the current read (for playack) or write
+ * index (for capture). Report this value back to the asoc framework.
+ */
+ p_rbuf = get_ringbuf(substream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ cur = readl(aio->cygaud->audio + p_rbuf->rdaddr);
+ else
+ cur = readl(aio->cygaud->audio + p_rbuf->wraddr);
+
+ base = readl(aio->cygaud->audio + p_rbuf->baseaddr);
+
+ /*
+ * Mask off the MSB of the rdaddr,wraddr and baseaddr
+ * since MSB is not part of the address
+ */
+ res = (cur & 0x7fffffff) - (base & 0x7fffffff);
+
+ return bytes_to_frames(substream->runtime, res);
+}
+
+static int cygnus_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size;
+
+ size = cygnus_pcm_hw.buffer_bytes_max;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+ buf->area = dma_alloc_coherent(pcm->card->dev, size,
+ &buf->addr, GFP_KERNEL);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s: size 0x%zx @ %pK\n",
+ __func__, size, buf->area);
+
+ if (!buf->area) {
+ dev_err(rtd->cpu_dai->dev, "%s: dma_alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ buf->bytes = size;
+
+ return 0;
+}
+
+
+static const struct snd_pcm_ops cygnus_pcm_ops = {
+ .open = cygnus_pcm_open,
+ .close = cygnus_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = cygnus_pcm_hw_params,
+ .hw_free = cygnus_pcm_hw_free,
+ .prepare = cygnus_pcm_prepare,
+ .trigger = cygnus_pcm_trigger,
+ .pointer = cygnus_pcm_pointer,
+};
+
+static void cygnus_dma_free_dma_buffers(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (substream) {
+ buf = &substream->dma_buffer;
+ if (buf->area) {
+ dma_free_coherent(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+ }
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (substream) {
+ buf = &substream->dma_buffer;
+ if (buf->area) {
+ dma_free_coherent(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+ }
+}
+
+static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret;
+
+ if (!card->dev->dma_mask)
+ card->dev->dma_mask = &cygnus_dma_dmamask;
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ ret = cygnus_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ return ret;
+ }
+
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+ ret = cygnus_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret) {
+ cygnus_dma_free_dma_buffers(pcm);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver cygnus_soc_platform = {
+ .ops = &cygnus_pcm_ops,
+ .pcm_new = cygnus_dma_new,
+ .pcm_free = cygnus_dma_free_dma_buffers,
+};
+
+int cygnus_soc_platform_register(struct device *dev,
+ struct cygnus_audio *cygaud)
+{
+ int rc = 0;
+
+ dev_dbg(dev, "%s Enter\n", __func__);
+
+ rc = devm_request_irq(dev, cygaud->irq_num, cygnus_dma_irq,
+ IRQF_SHARED, "cygnus-audio", cygaud);
+ if (rc) {
+ dev_err(dev, "%s request_irq error %d\n", __func__, rc);
+ return rc;
+ }
+
+ rc = snd_soc_register_platform(dev, &cygnus_soc_platform);
+ if (rc) {
+ dev_err(dev, "%s failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+int cygnus_soc_platform_unregister(struct device *dev)
+{
+ snd_soc_unregister_platform(dev);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Cygnus ASoC PCM module");
--- /dev/null
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "cygnus-ssp.h"
+
+#define DEFAULT_VCO 1354750204
+
+#define CYGNUS_TDM_RATE \
+ (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define CAPTURE_FCI_ID_BASE 0x180
+#define CYGNUS_SSP_TRISTATE_MASK 0x001fff
+#define CYGNUS_PLLCLKSEL_MASK 0xf
+
+/* Used with stream_on field to indicate which streams are active */
+#define PLAYBACK_STREAM_MASK BIT(0)
+#define CAPTURE_STREAM_MASK BIT(1)
+
+#define I2S_STREAM_CFG_MASK 0xff003ff
+#define I2S_CAP_STREAM_CFG_MASK 0xf0
+#define SPDIF_STREAM_CFG_MASK 0x3ff
+#define CH_GRP_STEREO 0x1
+
+/* Begin register offset defines */
+#define AUD_MISC_SEROUT_OE_REG_BASE 0x01c
+#define AUD_MISC_SEROUT_SPDIF_OE 12
+#define AUD_MISC_SEROUT_MCLK_OE 3
+#define AUD_MISC_SEROUT_LRCK_OE 2
+#define AUD_MISC_SEROUT_SCLK_OE 1
+#define AUD_MISC_SEROUT_SDAT_OE 0
+
+/* AUD_FMM_BF_CTRL_xxx regs */
+#define BF_DST_CFG0_OFFSET 0x100
+#define BF_DST_CFG1_OFFSET 0x104
+#define BF_DST_CFG2_OFFSET 0x108
+
+#define BF_DST_CTRL0_OFFSET 0x130
+#define BF_DST_CTRL1_OFFSET 0x134
+#define BF_DST_CTRL2_OFFSET 0x138
+
+#define BF_SRC_CFG0_OFFSET 0x148
+#define BF_SRC_CFG1_OFFSET 0x14c
+#define BF_SRC_CFG2_OFFSET 0x150
+#define BF_SRC_CFG3_OFFSET 0x154
+
+#define BF_SRC_CTRL0_OFFSET 0x1c0
+#define BF_SRC_CTRL1_OFFSET 0x1c4
+#define BF_SRC_CTRL2_OFFSET 0x1c8
+#define BF_SRC_CTRL3_OFFSET 0x1cc
+
+#define BF_SRC_GRP0_OFFSET 0x1fc
+#define BF_SRC_GRP1_OFFSET 0x200
+#define BF_SRC_GRP2_OFFSET 0x204
+#define BF_SRC_GRP3_OFFSET 0x208
+
+#define BF_SRC_GRP_EN_OFFSET 0x320
+#define BF_SRC_GRP_FLOWON_OFFSET 0x324
+#define BF_SRC_GRP_SYNC_DIS_OFFSET 0x328
+
+/* AUD_FMM_IOP_OUT_I2S_xxx regs */
+#define OUT_I2S_0_STREAM_CFG_OFFSET 0xa00
+#define OUT_I2S_0_CFG_OFFSET 0xa04
+#define OUT_I2S_0_MCLK_CFG_OFFSET 0xa0c
+
+#define OUT_I2S_1_STREAM_CFG_OFFSET 0xa40
+#define OUT_I2S_1_CFG_OFFSET 0xa44
+#define OUT_I2S_1_MCLK_CFG_OFFSET 0xa4c
+
+#define OUT_I2S_2_STREAM_CFG_OFFSET 0xa80
+#define OUT_I2S_2_CFG_OFFSET 0xa84
+#define OUT_I2S_2_MCLK_CFG_OFFSET 0xa8c
+
+/* AUD_FMM_IOP_OUT_SPDIF_xxx regs */
+#define SPDIF_STREAM_CFG_OFFSET 0xac0
+#define SPDIF_CTRL_OFFSET 0xac4
+#define SPDIF_FORMAT_CFG_OFFSET 0xad8
+#define SPDIF_MCLK_CFG_OFFSET 0xadc
+
+/* AUD_FMM_IOP_PLL_0_xxx regs */
+#define IOP_PLL_0_MACRO_OFFSET 0xb00
+#define IOP_PLL_0_MDIV_Ch0_OFFSET 0xb14
+#define IOP_PLL_0_MDIV_Ch1_OFFSET 0xb18
+#define IOP_PLL_0_MDIV_Ch2_OFFSET 0xb1c
+
+#define IOP_PLL_0_ACTIVE_MDIV_Ch0_OFFSET 0xb30
+#define IOP_PLL_0_ACTIVE_MDIV_Ch1_OFFSET 0xb34
+#define IOP_PLL_0_ACTIVE_MDIV_Ch2_OFFSET 0xb38
+
+/* AUD_FMM_IOP_xxx regs */
+#define IOP_PLL_0_CONTROL_OFFSET 0xb04
+#define IOP_PLL_0_USER_NDIV_OFFSET 0xb08
+#define IOP_PLL_0_ACTIVE_NDIV_OFFSET 0xb20
+#define IOP_PLL_0_RESET_OFFSET 0xb5c
+
+/* AUD_FMM_IOP_IN_I2S_xxx regs */
+#define IN_I2S_0_STREAM_CFG_OFFSET 0x00
+#define IN_I2S_0_CFG_OFFSET 0x04
+#define IN_I2S_1_STREAM_CFG_OFFSET 0x40
+#define IN_I2S_1_CFG_OFFSET 0x44
+#define IN_I2S_2_STREAM_CFG_OFFSET 0x80
+#define IN_I2S_2_CFG_OFFSET 0x84
+
+/* AUD_FMM_IOP_MISC_xxx regs */
+#define IOP_SW_INIT_LOGIC 0x1c0
+
+/* End register offset defines */
+
+
+/* AUD_FMM_IOP_OUT_I2S_x_MCLK_CFG_0_REG */
+#define I2S_OUT_MCLKRATE_SHIFT 16
+
+/* AUD_FMM_IOP_OUT_I2S_x_MCLK_CFG_REG */
+#define I2S_OUT_PLLCLKSEL_SHIFT 0
+
+/* AUD_FMM_IOP_OUT_I2S_x_STREAM_CFG */
+#define I2S_OUT_STREAM_ENA 31
+#define I2S_OUT_STREAM_CFG_GROUP_ID 20
+#define I2S_OUT_STREAM_CFG_CHANNEL_GROUPING 24
+
+/* AUD_FMM_IOP_IN_I2S_x_CAP */
+#define I2S_IN_STREAM_CFG_CAP_ENA 31
+#define I2S_IN_STREAM_CFG_0_GROUP_ID 4
+
+/* AUD_FMM_IOP_OUT_I2S_x_I2S_CFG_REG */
+#define I2S_OUT_CFGX_CLK_ENA 0
+#define I2S_OUT_CFGX_DATA_ENABLE 1
+#define I2S_OUT_CFGX_DATA_ALIGNMENT 6
+#define I2S_OUT_CFGX_BITS_PER_SLOT 13
+#define I2S_OUT_CFGX_VALID_SLOT 14
+#define I2S_OUT_CFGX_FSYNC_WIDTH 18
+#define I2S_OUT_CFGX_SCLKS_PER_1FS_DIV32 26
+#define I2S_OUT_CFGX_SLAVE_MODE 30
+#define I2S_OUT_CFGX_TDM_MODE 31
+
+/* AUD_FMM_BF_CTRL_SOURCECH_CFGx_REG */
+#define BF_SRC_CFGX_SFIFO_ENA 0
+#define BF_SRC_CFGX_BUFFER_PAIR_ENABLE 1
+#define BF_SRC_CFGX_SAMPLE_CH_MODE 2
+#define BF_SRC_CFGX_SFIFO_SZ_DOUBLE 5
+#define BF_SRC_CFGX_NOT_PAUSE_WHEN_EMPTY 10
+#define BF_SRC_CFGX_BIT_RES 20
+#define BF_SRC_CFGX_PROCESS_SEQ_ID_VALID 31
+
+/* AUD_FMM_BF_CTRL_DESTCH_CFGx_REG */
+#define BF_DST_CFGX_CAP_ENA 0
+#define BF_DST_CFGX_BUFFER_PAIR_ENABLE 1
+#define BF_DST_CFGX_DFIFO_SZ_DOUBLE 2
+#define BF_DST_CFGX_NOT_PAUSE_WHEN_FULL 11
+#define BF_DST_CFGX_FCI_ID 12
+#define BF_DST_CFGX_CAP_MODE 24
+#define BF_DST_CFGX_PROC_SEQ_ID_VALID 31
+
+/* AUD_FMM_IOP_OUT_SPDIF_xxx */
+#define SPDIF_0_OUT_DITHER_ENA 3
+#define SPDIF_0_OUT_STREAM_ENA 31
+
+/* AUD_FMM_IOP_PLL_0_USER */
+#define IOP_PLL_0_USER_NDIV_FRAC 10
+
+/* AUD_FMM_IOP_PLL_0_ACTIVE */
+#define IOP_PLL_0_ACTIVE_NDIV_FRAC 10
+
+
+#define INIT_SSP_REGS(num) (struct cygnus_ssp_regs){ \
+ .i2s_stream_cfg = OUT_I2S_ ##num## _STREAM_CFG_OFFSET, \
+ .i2s_cap_stream_cfg = IN_I2S_ ##num## _STREAM_CFG_OFFSET, \
+ .i2s_cfg = OUT_I2S_ ##num## _CFG_OFFSET, \
+ .i2s_cap_cfg = IN_I2S_ ##num## _CFG_OFFSET, \
+ .i2s_mclk_cfg = OUT_I2S_ ##num## _MCLK_CFG_OFFSET, \
+ .bf_destch_ctrl = BF_DST_CTRL ##num## _OFFSET, \
+ .bf_destch_cfg = BF_DST_CFG ##num## _OFFSET, \
+ .bf_sourcech_ctrl = BF_SRC_CTRL ##num## _OFFSET, \
+ .bf_sourcech_cfg = BF_SRC_CFG ##num## _OFFSET, \
+ .bf_sourcech_grp = BF_SRC_GRP ##num## _OFFSET \
+}
+
+struct pll_macro_entry {
+ u32 mclk;
+ u32 pll_ch_num;
+};
+
+/*
+ * PLL has 3 output channels (1x, 2x, and 4x). Below are
+ * the common MCLK frequencies used by audio driver
+ */
+static const struct pll_macro_entry pll_predef_mclk[] = {
+ { 4096000, 0},
+ { 8192000, 1},
+ {16384000, 2},
+
+ { 5644800, 0},
+ {11289600, 1},
+ {22579200, 2},
+
+ { 6144000, 0},
+ {12288000, 1},
+ {24576000, 2},
+
+ {12288000, 0},
+ {24576000, 1},
+ {49152000, 2},
+
+ {22579200, 0},
+ {45158400, 1},
+ {90316800, 2},
+
+ {24576000, 0},
+ {49152000, 1},
+ {98304000, 2},
+};
+
+/* List of valid frame sizes for tdm mode */
+static const int ssp_valid_tdm_framesize[] = {32, 64, 128, 256, 512};
+
+/*
+ * Use this relationship to derive the sampling rate (lrclk)
+ * lrclk = (mclk) / ((2*mclk_to_sclk_ratio) * (32 * SCLK))).
+ *
+ * Use mclk and pll_ch from the table above
+ *
+ * Valid SCLK = 0/1/2/4/8/12
+ *
+ * mclk_to_sclk_ratio = number of MCLK per SCLK. Division is twice the
+ * value programmed in this field.
+ * Valid mclk_to_sclk_ratio = 1 through to 15
+ *
+ * eg: To set lrclk = 48khz, set mclk = 12288000, mclk_to_sclk_ratio = 2,
+ * SCLK = 64
+ */
+struct _ssp_clk_coeff {
+ u32 mclk;
+ u32 sclk_rate;
+ u32 rate;
+ u32 mclk_rate;
+};
+
+static const struct _ssp_clk_coeff ssp_clk_coeff[] = {
+ { 4096000, 32, 16000, 4},
+ { 4096000, 32, 32000, 2},
+ { 4096000, 64, 8000, 4},
+ { 4096000, 64, 16000, 2},
+ { 4096000, 64, 32000, 1},
+ { 4096000, 128, 8000, 2},
+ { 4096000, 128, 16000, 1},
+ { 4096000, 256, 8000, 1},
+
+ { 6144000, 32, 16000, 6},
+ { 6144000, 32, 32000, 3},
+ { 6144000, 32, 48000, 2},
+ { 6144000, 32, 96000, 1},
+ { 6144000, 64, 8000, 6},
+ { 6144000, 64, 16000, 3},
+ { 6144000, 64, 48000, 1},
+ { 6144000, 128, 8000, 3},
+
+ { 8192000, 32, 32000, 4},
+ { 8192000, 64, 16000, 4},
+ { 8192000, 64, 32000, 2},
+ { 8192000, 128, 8000, 4},
+ { 8192000, 128, 16000, 2},
+ { 8192000, 128, 32000, 1},
+ { 8192000, 256, 8000, 2},
+ { 8192000, 256, 16000, 1},
+ { 8192000, 512, 8000, 1},
+
+ {12288000, 32, 32000, 6},
+ {12288000, 32, 48000, 4},
+ {12288000, 32, 96000, 2},
+ {12288000, 32, 192000, 1},
+ {12288000, 64, 16000, 6},
+ {12288000, 64, 32000, 3},
+ {12288000, 64, 48000, 2},
+ {12288000, 64, 96000, 1},
+ {12288000, 128, 8000, 6},
+ {12288000, 128, 16000, 3},
+ {12288000, 128, 48000, 1},
+ {12288000, 256, 8000, 3},
+
+ {16384000, 64, 32000, 4},
+ {16384000, 128, 16000, 4},
+ {16384000, 128, 32000, 2},
+ {16384000, 256, 8000, 4},
+ {16384000, 256, 16000, 2},
+ {16384000, 256, 32000, 1},
+ {16384000, 512, 8000, 2},
+ {16384000, 512, 16000, 1},
+
+ {24576000, 32, 96000, 4},
+ {24576000, 32, 192000, 2},
+ {24576000, 64, 32000, 6},
+ {24576000, 64, 48000, 4},
+ {24576000, 64, 96000, 2},
+ {24576000, 64, 192000, 1},
+ {24576000, 128, 16000, 6},
+ {24576000, 128, 32000, 3},
+ {24576000, 128, 48000, 2},
+ {24576000, 256, 8000, 6},
+ {24576000, 256, 16000, 3},
+ {24576000, 256, 48000, 1},
+ {24576000, 512, 8000, 3},
+
+ {49152000, 32, 192000, 4},
+ {49152000, 64, 96000, 4},
+ {49152000, 64, 192000, 2},
+ {49152000, 128, 32000, 6},
+ {49152000, 128, 48000, 4},
+ {49152000, 128, 96000, 2},
+ {49152000, 128, 192000, 1},
+ {49152000, 256, 16000, 6},
+ {49152000, 256, 32000, 3},
+ {49152000, 256, 48000, 2},
+ {49152000, 256, 96000, 1},
+ {49152000, 512, 8000, 6},
+ {49152000, 512, 16000, 3},
+ {49152000, 512, 48000, 1},
+
+ { 5644800, 32, 22050, 4},
+ { 5644800, 32, 44100, 2},
+ { 5644800, 32, 88200, 1},
+ { 5644800, 64, 11025, 4},
+ { 5644800, 64, 22050, 2},
+ { 5644800, 64, 44100, 1},
+
+ {11289600, 32, 44100, 4},
+ {11289600, 32, 88200, 2},
+ {11289600, 32, 176400, 1},
+ {11289600, 64, 22050, 4},
+ {11289600, 64, 44100, 2},
+ {11289600, 64, 88200, 1},
+ {11289600, 128, 11025, 4},
+ {11289600, 128, 22050, 2},
+ {11289600, 128, 44100, 1},
+
+ {22579200, 32, 88200, 4},
+ {22579200, 32, 176400, 2},
+ {22579200, 64, 44100, 4},
+ {22579200, 64, 88200, 2},
+ {22579200, 64, 176400, 1},
+ {22579200, 128, 22050, 4},
+ {22579200, 128, 44100, 2},
+ {22579200, 128, 88200, 1},
+ {22579200, 256, 11025, 4},
+ {22579200, 256, 22050, 2},
+ {22579200, 256, 44100, 1},
+
+ {45158400, 32, 176400, 4},
+ {45158400, 64, 88200, 4},
+ {45158400, 64, 176400, 2},
+ {45158400, 128, 44100, 4},
+ {45158400, 128, 88200, 2},
+ {45158400, 128, 176400, 1},
+ {45158400, 256, 22050, 4},
+ {45158400, 256, 44100, 2},
+ {45158400, 256, 88200, 1},
+ {45158400, 512, 11025, 4},
+ {45158400, 512, 22050, 2},
+ {45158400, 512, 44100, 1},
+};
+
+static struct cygnus_aio_port *cygnus_dai_get_portinfo(struct snd_soc_dai *dai)
+{
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+
+ return &cygaud->portinfo[dai->id];
+}
+
+static int audio_ssp_init_portregs(struct cygnus_aio_port *aio)
+{
+ u32 value, fci_id;
+ int status = 0;
+
+ switch (aio->port_type) {
+ case PORT_TDM:
+ value = readl(aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+ value &= ~I2S_STREAM_CFG_MASK;
+
+ /* Set Group ID */
+ writel(aio->portnum,
+ aio->cygaud->audio + aio->regs.bf_sourcech_grp);
+
+ /* Configure the AUD_FMM_IOP_OUT_I2S_x_STREAM_CFG reg */
+ value |= aio->portnum << I2S_OUT_STREAM_CFG_GROUP_ID;
+ value |= aio->portnum; /* FCI ID is the port num */
+ value |= CH_GRP_STEREO << I2S_OUT_STREAM_CFG_CHANNEL_GROUPING;
+ writel(value, aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+
+ /* Configure the AUD_FMM_BF_CTRL_SOURCECH_CFGX reg */
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_NOT_PAUSE_WHEN_EMPTY);
+ value |= BIT(BF_SRC_CFGX_SFIFO_SZ_DOUBLE);
+ value |= BIT(BF_SRC_CFGX_PROCESS_SEQ_ID_VALID);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ /* Configure the AUD_FMM_IOP_IN_I2S_x_CAP_STREAM_CFG_0 reg */
+ value = readl(aio->cygaud->i2s_in +
+ aio->regs.i2s_cap_stream_cfg);
+ value &= ~I2S_CAP_STREAM_CFG_MASK;
+ value |= aio->portnum << I2S_IN_STREAM_CFG_0_GROUP_ID;
+ writel(value, aio->cygaud->i2s_in +
+ aio->regs.i2s_cap_stream_cfg);
+
+ /* Configure the AUD_FMM_BF_CTRL_DESTCH_CFGX_REG_BASE reg */
+ fci_id = CAPTURE_FCI_ID_BASE + aio->portnum;
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_destch_cfg);
+ value |= BIT(BF_DST_CFGX_DFIFO_SZ_DOUBLE);
+ value &= ~BIT(BF_DST_CFGX_NOT_PAUSE_WHEN_FULL);
+ value |= (fci_id << BF_DST_CFGX_FCI_ID);
+ value |= BIT(BF_DST_CFGX_PROC_SEQ_ID_VALID);
+ writel(value, aio->cygaud->audio + aio->regs.bf_destch_cfg);
+
+ /* Enable the transmit pin for this port */
+ value = readl(aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ value &= ~BIT((aio->portnum * 4) + AUD_MISC_SEROUT_SDAT_OE);
+ writel(value, aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ break;
+ case PORT_SPDIF:
+ writel(aio->portnum, aio->cygaud->audio + BF_SRC_GRP3_OFFSET);
+
+ value = readl(aio->cygaud->audio + SPDIF_CTRL_OFFSET);
+ value |= BIT(SPDIF_0_OUT_DITHER_ENA);
+ writel(value, aio->cygaud->audio + SPDIF_CTRL_OFFSET);
+
+ /* Enable and set the FCI ID for the SPDIF channel */
+ value = readl(aio->cygaud->audio + SPDIF_STREAM_CFG_OFFSET);
+ value &= ~SPDIF_STREAM_CFG_MASK;
+ value |= aio->portnum; /* FCI ID is the port num */
+ value |= BIT(SPDIF_0_OUT_STREAM_ENA);
+ writel(value, aio->cygaud->audio + SPDIF_STREAM_CFG_OFFSET);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_NOT_PAUSE_WHEN_EMPTY);
+ value |= BIT(BF_SRC_CFGX_SFIFO_SZ_DOUBLE);
+ value |= BIT(BF_SRC_CFGX_PROCESS_SEQ_ID_VALID);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ /* Enable the spdif output pin */
+ value = readl(aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ value &= ~BIT(AUD_MISC_SEROUT_SPDIF_OE);
+ writel(value, aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ break;
+ default:
+ dev_err(aio->cygaud->dev, "Port not supported\n");
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static void audio_ssp_in_enable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_destch_cfg);
+ value |= BIT(BF_DST_CFGX_CAP_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_destch_cfg);
+
+ writel(0x1, aio->cygaud->audio + aio->regs.bf_destch_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value |= BIT(I2S_OUT_CFGX_CLK_ENA);
+ value |= BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ value = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+ value |= BIT(I2S_IN_STREAM_CFG_CAP_ENA);
+ writel(value, aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+
+ aio->streams_on |= CAPTURE_STREAM_MASK;
+}
+
+static void audio_ssp_in_disable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+
+ value = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+ value &= ~BIT(I2S_IN_STREAM_CFG_CAP_ENA);
+ writel(value, aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+
+ aio->streams_on &= ~CAPTURE_STREAM_MASK;
+
+ /* If both playback and capture are off */
+ if (!aio->streams_on) {
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~BIT(I2S_OUT_CFGX_CLK_ENA);
+ value &= ~BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+ }
+
+ writel(0x0, aio->cygaud->audio + aio->regs.bf_destch_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_destch_cfg);
+ value &= ~BIT(BF_DST_CFGX_CAP_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_destch_cfg);
+}
+
+static int audio_ssp_out_enable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+ int status = 0;
+
+ switch (aio->port_type) {
+ case PORT_TDM:
+ value = readl(aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+ value |= BIT(I2S_OUT_STREAM_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+
+ writel(1, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value |= BIT(I2S_OUT_CFGX_CLK_ENA);
+ value |= BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value |= BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ aio->streams_on |= PLAYBACK_STREAM_MASK;
+ break;
+ case PORT_SPDIF:
+ value = readl(aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+ value |= 0x3;
+ writel(value, aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+
+ writel(1, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value |= BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ break;
+ default:
+ dev_err(aio->cygaud->dev,
+ "Port not supported %d\n", aio->portnum);
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static int audio_ssp_out_disable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+ int status = 0;
+
+ switch (aio->port_type) {
+ case PORT_TDM:
+ aio->streams_on &= ~PLAYBACK_STREAM_MASK;
+
+ /* If both playback and capture are off */
+ if (!aio->streams_on) {
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~BIT(I2S_OUT_CFGX_CLK_ENA);
+ value &= ~BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+ }
+
+ /* set group_sync_dis = 1 */
+ value = readl(aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+ value |= BIT(aio->portnum);
+ writel(value, aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+
+ writel(0, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ /* set group_sync_dis = 0 */
+ value = readl(aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+ value &= ~BIT(aio->portnum);
+ writel(value, aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+
+ value = readl(aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+ value &= ~BIT(I2S_OUT_STREAM_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+
+ /* IOP SW INIT on OUT_I2S_x */
+ value = readl(aio->cygaud->i2s_in + IOP_SW_INIT_LOGIC);
+ value |= BIT(aio->portnum);
+ writel(value, aio->cygaud->i2s_in + IOP_SW_INIT_LOGIC);
+ value &= ~BIT(aio->portnum);
+ writel(value, aio->cygaud->i2s_in + IOP_SW_INIT_LOGIC);
+ break;
+ case PORT_SPDIF:
+ value = readl(aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+ value &= ~0x3;
+ writel(value, aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+ writel(0, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ break;
+ default:
+ dev_err(aio->cygaud->dev,
+ "Port not supported %d\n", aio->portnum);
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static int pll_configure_mclk(struct cygnus_audio *cygaud, u32 mclk,
+ struct cygnus_aio_port *aio)
+{
+ int i = 0, error;
+ bool found = false;
+ const struct pll_macro_entry *p_entry;
+ struct clk *ch_clk;
+
+ for (i = 0; i < ARRAY_SIZE(pll_predef_mclk); i++) {
+ p_entry = &pll_predef_mclk[i];
+ if (p_entry->mclk == mclk) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_err(cygaud->dev,
+ "%s No valid mclk freq (%u) found!\n", __func__, mclk);
+ return -EINVAL;
+ }
+
+ ch_clk = cygaud->audio_clk[p_entry->pll_ch_num];
+
+ if ((aio->clk_trace.cap_en) && (!aio->clk_trace.cap_clk_en)) {
+ error = clk_prepare_enable(ch_clk);
+ if (error) {
+ dev_err(cygaud->dev, "%s clk_prepare_enable failed %d\n",
+ __func__, error);
+ return error;
+ }
+ aio->clk_trace.cap_clk_en = true;
+ }
+
+ if ((aio->clk_trace.play_en) && (!aio->clk_trace.play_clk_en)) {
+ error = clk_prepare_enable(ch_clk);
+ if (error) {
+ dev_err(cygaud->dev, "%s clk_prepare_enable failed %d\n",
+ __func__, error);
+ return error;
+ }
+ aio->clk_trace.play_clk_en = true;
+ }
+
+ error = clk_set_rate(ch_clk, mclk);
+ if (error) {
+ dev_err(cygaud->dev, "%s Set MCLK rate failed: %d\n",
+ __func__, error);
+ return error;
+ }
+
+ return p_entry->pll_ch_num;
+}
+
+static int cygnus_ssp_set_clocks(struct cygnus_aio_port *aio,
+ struct cygnus_audio *cygaud)
+{
+ u32 value, i = 0;
+ u32 mask = 0xf;
+ u32 sclk;
+ bool found = false;
+ const struct _ssp_clk_coeff *p_entry = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(ssp_clk_coeff); i++) {
+ p_entry = &ssp_clk_coeff[i];
+ if ((p_entry->rate == aio->lrclk) &&
+ (p_entry->sclk_rate == aio->bit_per_frame) &&
+ (p_entry->mclk == aio->mclk)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_err(aio->cygaud->dev,
+ "No valid match found in ssp_clk_coeff array\n");
+ dev_err(aio->cygaud->dev, "lrclk = %u, bits/frame = %u, mclk = %u\n",
+ aio->lrclk, aio->bit_per_frame, aio->mclk);
+ return -EINVAL;
+ }
+
+ sclk = aio->bit_per_frame;
+ if (sclk == 512)
+ sclk = 0;
+ /* sclks_per_1fs_div = sclk cycles/32 */
+ sclk /= 32;
+ /* Set sclk rate */
+ switch (aio->port_type) {
+ case PORT_TDM:
+ /* Set number of bitclks per frame */
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~(mask << I2S_OUT_CFGX_SCLKS_PER_1FS_DIV32);
+ value |= sclk << I2S_OUT_CFGX_SCLKS_PER_1FS_DIV32;
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+ dev_dbg(aio->cygaud->dev,
+ "SCLKS_PER_1FS_DIV32 = 0x%x\n", value);
+ break;
+ case PORT_SPDIF:
+ break;
+ default:
+ dev_err(aio->cygaud->dev, "Unknown port type\n");
+ return -EINVAL;
+ }
+
+ /* Set MCLK_RATE ssp port (spdif and ssp are the same) */
+ value = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ value &= ~(0xf << I2S_OUT_MCLKRATE_SHIFT);
+ value |= (p_entry->mclk_rate << I2S_OUT_MCLKRATE_SHIFT);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+
+ dev_dbg(aio->cygaud->dev, "mclk cfg reg = 0x%x\n", value);
+ dev_dbg(aio->cygaud->dev, "bits per frame = %u, mclk = %u Hz, lrclk = %u Hz\n",
+ aio->bit_per_frame, aio->mclk, aio->lrclk);
+ return 0;
+}
+
+static int cygnus_ssp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+ int rate, bitres;
+ u32 value;
+ u32 mask = 0x1f;
+ int ret = 0;
+
+ dev_dbg(aio->cygaud->dev, "%s port = %d\n", __func__, aio->portnum);
+ dev_dbg(aio->cygaud->dev, "params_channels %d\n",
+ params_channels(params));
+ dev_dbg(aio->cygaud->dev, "rate %d\n", params_rate(params));
+ dev_dbg(aio->cygaud->dev, "format %d\n", params_format(params));
+
+ rate = params_rate(params);
+
+ switch (aio->mode) {
+ case CYGNUS_SSPMODE_TDM:
+ if ((rate == 192000) && (params_channels(params) > 4)) {
+ dev_err(aio->cygaud->dev, "Cannot run %d channels at %dHz\n",
+ params_channels(params), rate);
+ return -EINVAL;
+ }
+ break;
+ case CYGNUS_SSPMODE_I2S:
+ aio->bit_per_frame = 64; /* I2S must be 64 bit per frame */
+ break;
+ default:
+ dev_err(aio->cygaud->dev,
+ "%s port running in unknown mode\n", __func__);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_BUFFER_PAIR_ENABLE);
+ /* Configure channels as mono or stereo/TDM */
+ if (params_channels(params) == 1)
+ value |= BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
+ else
+ value &= ~BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ if (aio->port_type == PORT_SPDIF) {
+ dev_err(aio->cygaud->dev,
+ "SPDIF does not support 8bit format\n");
+ return -EINVAL;
+ }
+ bitres = 8;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bitres = 16;
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ /* 32 bit mode is coded as 0 */
+ bitres = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~(mask << BF_SRC_CFGX_BIT_RES);
+ value |= (bitres << BF_SRC_CFGX_BIT_RES);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ } else {
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = readl(aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ value |= BIT(BF_DST_CFGX_CAP_MODE);
+ writel(value, aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = readl(aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ value &= ~BIT(BF_DST_CFGX_CAP_MODE);
+ writel(value, aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ aio->lrclk = rate;
+
+ if (!aio->is_slave)
+ ret = cygnus_ssp_set_clocks(aio, cygaud);
+
+ return ret;
+}
+
+/*
+ * This function sets the mclk frequency for pll clock
+ */
+static int cygnus_ssp_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ int sel;
+ u32 value;
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(aio->cygaud->dev,
+ "%s Enter port = %d\n", __func__, aio->portnum);
+ sel = pll_configure_mclk(cygaud, freq, aio);
+ if (sel < 0) {
+ dev_err(aio->cygaud->dev,
+ "%s Setting mclk failed.\n", __func__);
+ return -EINVAL;
+ }
+
+ aio->mclk = freq;
+
+ dev_dbg(aio->cygaud->dev, "%s Setting MCLKSEL to %d\n", __func__, sel);
+ value = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ value &= ~(0xf << I2S_OUT_PLLCLKSEL_SHIFT);
+ value |= (sel << I2S_OUT_PLLCLKSEL_SHIFT);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+
+ return 0;
+}
+
+static int cygnus_ssp_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+
+ snd_soc_dai_set_dma_data(dai, substream, aio);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->clk_trace.play_en = true;
+ else
+ aio->clk_trace.cap_en = true;
+
+ return 0;
+}
+
+static void cygnus_ssp_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->clk_trace.play_en = false;
+ else
+ aio->clk_trace.cap_en = false;
+
+ if (!aio->is_slave) {
+ u32 val;
+
+ val = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ val &= CYGNUS_PLLCLKSEL_MASK;
+ if (val >= ARRAY_SIZE(aio->cygaud->audio_clk)) {
+ dev_err(aio->cygaud->dev, "Clk index %u is out of bounds\n",
+ val);
+ return;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (aio->clk_trace.play_clk_en) {
+ clk_disable_unprepare(aio->cygaud->
+ audio_clk[val]);
+ aio->clk_trace.play_clk_en = false;
+ }
+ } else {
+ if (aio->clk_trace.cap_clk_en) {
+ clk_disable_unprepare(aio->cygaud->
+ audio_clk[val]);
+ aio->clk_trace.cap_clk_en = false;
+ }
+ }
+ }
+}
+
+/*
+ * Bit Update Notes
+ * 31 Yes TDM Mode (1 = TDM, 0 = i2s)
+ * 30 Yes Slave Mode (1 = Slave, 0 = Master)
+ * 29:26 No Sclks per frame
+ * 25:18 Yes FS Width
+ * 17:14 No Valid Slots
+ * 13 No Bits (1 = 16 bits, 0 = 32 bits)
+ * 12:08 No Bits per samp
+ * 07 Yes Justifcation (1 = LSB, 0 = MSB)
+ * 06 Yes Alignment (1 = Delay 1 clk, 0 = no delay
+ * 05 Yes SCLK polarity (1 = Rising, 0 = Falling)
+ * 04 Yes LRCLK Polarity (1 = High for left, 0 = Low for left)
+ * 03:02 Yes Reserved - write as zero
+ * 01 No Data Enable
+ * 00 No CLK Enable
+ */
+#define I2S_OUT_CFG_REG_UPDATE_MASK 0x3C03FF03
+
+/* Input cfg is same as output, but the FS width is not a valid field */
+#define I2S_IN_CFG_REG_UPDATE_MASK (I2S_OUT_CFG_REG_UPDATE_MASK | 0x03FC0000)
+
+int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai, int len)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+
+ if ((len > 0) && (len < 256)) {
+ aio->fsync_width = len;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int cygnus_ssp_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+ u32 ssp_curcfg;
+ u32 ssp_newcfg;
+ u32 ssp_outcfg;
+ u32 ssp_incfg;
+ u32 val;
+ u32 mask;
+
+ dev_dbg(aio->cygaud->dev, "%s Enter fmt: %x\n", __func__, fmt);
+
+ if (aio->port_type == PORT_SPDIF)
+ return -EINVAL;
+
+ ssp_newcfg = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_SLAVE_MODE);
+ aio->is_slave = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ ssp_newcfg &= ~BIT(I2S_OUT_CFGX_SLAVE_MODE);
+ aio->is_slave = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_DATA_ALIGNMENT);
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_FSYNC_WIDTH);
+ aio->mode = CYGNUS_SSPMODE_I2S;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_TDM_MODE);
+
+ /* DSP_A = data after FS, DSP_B = data during FS */
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_DSP_A)
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_DATA_ALIGNMENT);
+
+ if ((aio->fsync_width > 0) && (aio->fsync_width < 256))
+ ssp_newcfg |=
+ (aio->fsync_width << I2S_OUT_CFGX_FSYNC_WIDTH);
+ else
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_FSYNC_WIDTH);
+
+ aio->mode = CYGNUS_SSPMODE_TDM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * SSP out cfg.
+ * Retain bits we do not want to update, then OR in new bits
+ */
+ ssp_curcfg = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ ssp_outcfg = (ssp_curcfg & I2S_OUT_CFG_REG_UPDATE_MASK) | ssp_newcfg;
+ writel(ssp_outcfg, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ /*
+ * SSP in cfg.
+ * Retain bits we do not want to update, then OR in new bits
+ */
+ ssp_curcfg = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+ ssp_incfg = (ssp_curcfg & I2S_IN_CFG_REG_UPDATE_MASK) | ssp_newcfg;
+ writel(ssp_incfg, aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+
+ val = readl(aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+
+ /*
+ * Configure the word clk and bit clk as output or tristate
+ * Each port has 4 bits for controlling its pins.
+ * Shift the mask based upon port number.
+ */
+ mask = BIT(AUD_MISC_SEROUT_LRCK_OE)
+ | BIT(AUD_MISC_SEROUT_SCLK_OE)
+ | BIT(AUD_MISC_SEROUT_MCLK_OE);
+ mask = mask << (aio->portnum * 4);
+ if (aio->is_slave)
+ /* Set bit for tri-state */
+ val |= mask;
+ else
+ /* Clear bit for drive */
+ val &= ~mask;
+
+ dev_dbg(aio->cygaud->dev, "%s Set OE bits 0x%x\n", __func__, val);
+ writel(val, aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+
+ return 0;
+}
+
+static int cygnus_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(aio->cygaud->dev,
+ "%s cmd %d at port = %d\n", __func__, cmd, aio->portnum);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ audio_ssp_out_enable(aio);
+ else
+ audio_ssp_in_enable(aio);
+ cygaud->active_ports++;
+
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ audio_ssp_out_disable(aio);
+ else
+ audio_ssp_in_disable(aio);
+ cygaud->active_ports--;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cygnus_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+ u32 value;
+ int bits_per_slot = 0; /* default to 32-bits per slot */
+ int frame_bits;
+ unsigned int active_slots;
+ bool found = false;
+ int i;
+
+ if (tx_mask != rx_mask) {
+ dev_err(aio->cygaud->dev,
+ "%s tx_mask must equal rx_mask\n", __func__);
+ return -EINVAL;
+ }
+
+ active_slots = hweight32(tx_mask);
+
+ if ((active_slots < 0) || (active_slots > 16))
+ return -EINVAL;
+
+ /* Slot value must be even */
+ if (active_slots % 2)
+ return -EINVAL;
+
+ /* We encode 16 slots as 0 in the reg */
+ if (active_slots == 16)
+ active_slots = 0;
+
+ /* Slot Width is either 16 or 32 */
+ switch (slot_width) {
+ case 16:
+ bits_per_slot = 1;
+ break;
+ case 32:
+ bits_per_slot = 0;
+ break;
+ default:
+ bits_per_slot = 0;
+ dev_warn(aio->cygaud->dev,
+ "%s Defaulting Slot Width to 32\n", __func__);
+ }
+
+ frame_bits = slots * slot_width;
+
+ for (i = 0; i < ARRAY_SIZE(ssp_valid_tdm_framesize); i++) {
+ if (ssp_valid_tdm_framesize[i] == frame_bits) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(aio->cygaud->dev,
+ "%s In TDM mode, frame bits INVALID (%d)\n",
+ __func__, frame_bits);
+ return -EINVAL;
+ }
+
+ aio->bit_per_frame = frame_bits;
+
+ dev_dbg(aio->cygaud->dev, "%s active_slots %u, bits per frame %d\n",
+ __func__, active_slots, frame_bits);
+
+ /* Set capture side of ssp port */
+ value = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+ value &= ~(0xf << I2S_OUT_CFGX_VALID_SLOT);
+ value |= (active_slots << I2S_OUT_CFGX_VALID_SLOT);
+ value &= ~BIT(I2S_OUT_CFGX_BITS_PER_SLOT);
+ value |= (bits_per_slot << I2S_OUT_CFGX_BITS_PER_SLOT);
+ writel(value, aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+
+ /* Set playback side of ssp port */
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~(0xf << I2S_OUT_CFGX_VALID_SLOT);
+ value |= (active_slots << I2S_OUT_CFGX_VALID_SLOT);
+ value &= ~BIT(I2S_OUT_CFGX_BITS_PER_SLOT);
+ value |= (bits_per_slot << I2S_OUT_CFGX_BITS_PER_SLOT);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cygnus_ssp_suspend(struct snd_soc_dai *cpu_dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+
+ if (!aio->is_slave) {
+ u32 val;
+
+ val = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ val &= CYGNUS_PLLCLKSEL_MASK;
+ if (val >= ARRAY_SIZE(aio->cygaud->audio_clk)) {
+ dev_err(aio->cygaud->dev, "Clk index %u is out of bounds\n",
+ val);
+ return -EINVAL;
+ }
+
+ if (aio->clk_trace.cap_clk_en)
+ clk_disable_unprepare(aio->cygaud->audio_clk[val]);
+ if (aio->clk_trace.play_clk_en)
+ clk_disable_unprepare(aio->cygaud->audio_clk[val]);
+
+ aio->pll_clk_num = val;
+ }
+
+ return 0;
+}
+
+static int cygnus_ssp_resume(struct snd_soc_dai *cpu_dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+ int error;
+
+ if (!aio->is_slave) {
+ if (aio->clk_trace.cap_clk_en) {
+ error = clk_prepare_enable(aio->cygaud->
+ audio_clk[aio->pll_clk_num]);
+ if (error) {
+ dev_err(aio->cygaud->dev, "%s clk_prepare_enable failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ if (aio->clk_trace.play_clk_en) {
+ error = clk_prepare_enable(aio->cygaud->
+ audio_clk[aio->pll_clk_num]);
+ if (error) {
+ if (aio->clk_trace.cap_clk_en)
+ clk_disable_unprepare(aio->cygaud->
+ audio_clk[aio->pll_clk_num]);
+ dev_err(aio->cygaud->dev, "%s clk_prepare_enable failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+#else
+#define cygnus_ssp_suspend NULL
+#define cygnus_ssp_resume NULL
+#endif
+
+static const struct snd_soc_dai_ops cygnus_ssp_dai_ops = {
+ .startup = cygnus_ssp_startup,
+ .shutdown = cygnus_ssp_shutdown,
+ .trigger = cygnus_ssp_trigger,
+ .hw_params = cygnus_ssp_hw_params,
+ .set_fmt = cygnus_ssp_set_fmt,
+ .set_sysclk = cygnus_ssp_set_sysclk,
+ .set_tdm_slot = cygnus_set_dai_tdm_slot,
+};
+
+
+#define INIT_CPU_DAI(num) { \
+ .name = "cygnus-ssp" #num, \
+ .playback = { \
+ .channels_min = 1, \
+ .channels_max = 16, \
+ .rates = CYGNUS_TDM_RATE | SNDRV_PCM_RATE_88200 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
+ SNDRV_PCM_RATE_192000, \
+ .formats = SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ }, \
+ .capture = { \
+ .channels_min = 2, \
+ .channels_max = 16, \
+ .rates = CYGNUS_TDM_RATE | SNDRV_PCM_RATE_88200 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
+ SNDRV_PCM_RATE_192000, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ }, \
+ .ops = &cygnus_ssp_dai_ops, \
+ .suspend = cygnus_ssp_suspend, \
+ .resume = cygnus_ssp_resume, \
+}
+
+static const struct snd_soc_dai_driver cygnus_ssp_dai_info[] = {
+ INIT_CPU_DAI(0),
+ INIT_CPU_DAI(1),
+ INIT_CPU_DAI(2),
+};
+
+static struct snd_soc_dai_driver cygnus_spdif_dai_info = {
+ .name = "cygnus-spdif",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CYGNUS_TDM_RATE | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &cygnus_ssp_dai_ops,
+ .suspend = cygnus_ssp_suspend,
+ .resume = cygnus_ssp_resume,
+};
+
+static struct snd_soc_dai_driver cygnus_ssp_dai[CYGNUS_MAX_PORTS];
+
+static const struct snd_soc_component_driver cygnus_ssp_component = {
+ .name = "cygnus-audio",
+};
+
+/*
+ * Return < 0 if error
+ * Return 0 if disabled
+ * Return 1 if enabled and node is parsed successfully
+ */
+static int parse_ssp_child_node(struct platform_device *pdev,
+ struct device_node *dn,
+ struct cygnus_audio *cygaud,
+ struct snd_soc_dai_driver *p_dai)
+{
+ struct cygnus_aio_port *aio;
+ struct cygnus_ssp_regs ssp_regs[3];
+ u32 rawval;
+ int portnum = -1;
+ enum cygnus_audio_port_type port_type;
+
+ if (of_property_read_u32(dn, "reg", &rawval)) {
+ dev_err(&pdev->dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ portnum = rawval;
+ switch (rawval) {
+ case 0:
+ ssp_regs[0] = INIT_SSP_REGS(0);
+ port_type = PORT_TDM;
+ break;
+ case 1:
+ ssp_regs[1] = INIT_SSP_REGS(1);
+ port_type = PORT_TDM;
+ break;
+ case 2:
+ ssp_regs[2] = INIT_SSP_REGS(2);
+ port_type = PORT_TDM;
+ break;
+ case 3:
+ port_type = PORT_SPDIF;
+ break;
+ default:
+ dev_err(&pdev->dev, "Bad value for reg %u\n", rawval);
+ return -EINVAL;
+ }
+
+ aio = &cygaud->portinfo[portnum];
+ aio->cygaud = cygaud;
+ aio->portnum = portnum;
+ aio->port_type = port_type;
+ aio->fsync_width = -1;
+
+ switch (port_type) {
+ case PORT_TDM:
+ aio->regs = ssp_regs[portnum];
+ *p_dai = cygnus_ssp_dai_info[portnum];
+ aio->mode = CYGNUS_SSPMODE_UNKNOWN;
+ break;
+
+ case PORT_SPDIF:
+ aio->regs.bf_sourcech_cfg = BF_SRC_CFG3_OFFSET;
+ aio->regs.bf_sourcech_ctrl = BF_SRC_CTRL3_OFFSET;
+ aio->regs.i2s_mclk_cfg = SPDIF_MCLK_CFG_OFFSET;
+ aio->regs.i2s_stream_cfg = SPDIF_STREAM_CFG_OFFSET;
+ *p_dai = cygnus_spdif_dai_info;
+
+ /* For the purposes of this code SPDIF can be I2S mode */
+ aio->mode = CYGNUS_SSPMODE_I2S;
+ break;
+ default:
+ dev_err(&pdev->dev, "Bad value for port_type %d\n", port_type);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "%s portnum = %d\n", __func__, aio->portnum);
+ aio->streams_on = 0;
+ aio->cygaud->dev = &pdev->dev;
+ aio->clk_trace.play_en = false;
+ aio->clk_trace.cap_en = false;
+
+ audio_ssp_init_portregs(aio);
+ return 0;
+}
+
+static int audio_clk_init(struct platform_device *pdev,
+ struct cygnus_audio *cygaud)
+{
+ int i;
+ char clk_name[PROP_LEN_MAX];
+
+ for (i = 0; i < ARRAY_SIZE(cygaud->audio_clk); i++) {
+ snprintf(clk_name, PROP_LEN_MAX, "ch%d_audio", i);
+
+ cygaud->audio_clk[i] = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(cygaud->audio_clk[i]))
+ return PTR_ERR(cygaud->audio_clk[i]);
+ }
+
+ return 0;
+}
+
+static int cygnus_ssp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child_node;
+ struct resource *res = pdev->resource;
+ struct cygnus_audio *cygaud;
+ int err = -EINVAL;
+ int node_count;
+ int active_port_count;
+
+ cygaud = devm_kzalloc(dev, sizeof(struct cygnus_audio), GFP_KERNEL);
+ if (!cygaud)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, cygaud);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ cygaud->audio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cygaud->audio))
+ return PTR_ERR(cygaud->audio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "i2s_in");
+ cygaud->i2s_in = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cygaud->i2s_in))
+ return PTR_ERR(cygaud->i2s_in);
+
+ /* Tri-state all controlable pins until we know that we need them */
+ writel(CYGNUS_SSP_TRISTATE_MASK,
+ cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+
+ node_count = of_get_child_count(pdev->dev.of_node);
+ if ((node_count < 1) || (node_count > CYGNUS_MAX_PORTS)) {
+ dev_err(dev, "child nodes is %d. Must be between 1 and %d\n",
+ node_count, CYGNUS_MAX_PORTS);
+ return -EINVAL;
+ }
+
+ active_port_count = 0;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child_node) {
+ err = parse_ssp_child_node(pdev, child_node, cygaud,
+ &cygnus_ssp_dai[active_port_count]);
+
+ /* negative is err, 0 is active and good, 1 is disabled */
+ if (err < 0)
+ return err;
+ else if (!err) {
+ dev_dbg(dev, "Activating DAI: %s\n",
+ cygnus_ssp_dai[active_port_count].name);
+ active_port_count++;
+ }
+ }
+
+ cygaud->dev = dev;
+ cygaud->active_ports = 0;
+
+ dev_dbg(dev, "Registering %d DAIs\n", active_port_count);
+ err = snd_soc_register_component(dev, &cygnus_ssp_component,
+ cygnus_ssp_dai, active_port_count);
+ if (err) {
+ dev_err(dev, "snd_soc_register_dai failed\n");
+ return err;
+ }
+
+ cygaud->irq_num = platform_get_irq(pdev, 0);
+ if (cygaud->irq_num <= 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ err = cygaud->irq_num;
+ goto err_irq;
+ }
+
+ err = audio_clk_init(pdev, cygaud);
+ if (err) {
+ dev_err(dev, "audio clock initialization failed\n");
+ goto err_irq;
+ }
+
+ err = cygnus_soc_platform_register(dev, cygaud);
+ if (err) {
+ dev_err(dev, "platform reg error %d\n", err);
+ goto err_irq;
+ }
+
+ return 0;
+
+err_irq:
+ snd_soc_unregister_component(dev);
+ return err;
+}
+
+static int cygnus_ssp_remove(struct platform_device *pdev)
+{
+ cygnus_soc_platform_unregister(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id cygnus_ssp_of_match[] = {
+ { .compatible = "brcm,cygnus-audio" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cygnus_ssp_of_match);
+
+static struct platform_driver cygnus_ssp_driver = {
+ .probe = cygnus_ssp_probe,
+ .remove = cygnus_ssp_remove,
+ .driver = {
+ .name = "cygnus-ssp",
+ .of_match_table = cygnus_ssp_of_match,
+ },
+};
+
+module_platform_driver(cygnus_ssp_driver);
+
+MODULE_ALIAS("platform:cygnus-ssp");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Cygnus ASoC SSP Interface");
--- /dev/null
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __CYGNUS_SSP_H__
+#define __CYGNUS_SSP_H__
+
+#define CYGNUS_TDM_DAI_MAX_SLOTS 16
+
+#define CYGNUS_MAX_PLAYBACK_PORTS 4
+#define CYGNUS_MAX_CAPTURE_PORTS 3
+#define CYGNUS_MAX_I2S_PORTS 3
+#define CYGNUS_MAX_PORTS CYGNUS_MAX_PLAYBACK_PORTS
+#define CYGNUS_AUIDO_MAX_NUM_CLKS 3
+
+#define CYGNUS_SSP_FRAMEBITS_DIV 1
+
+#define CYGNUS_SSPMODE_I2S 0
+#define CYGNUS_SSPMODE_TDM 1
+#define CYGNUS_SSPMODE_UNKNOWN -1
+
+#define CYGNUS_SSP_CLKSRC_PLL 0
+
+/* Max string length of our dt property names */
+#define PROP_LEN_MAX 40
+
+struct ringbuf_regs {
+ unsigned rdaddr;
+ unsigned wraddr;
+ unsigned baseaddr;
+ unsigned endaddr;
+ unsigned fmark; /* freemark for play, fullmark for caputure */
+ unsigned period_bytes;
+ unsigned buf_size;
+};
+
+#define RINGBUF_REG_PLAYBACK(num) ((struct ringbuf_regs) { \
+ .rdaddr = SRC_RBUF_ ##num## _RDADDR_OFFSET, \
+ .wraddr = SRC_RBUF_ ##num## _WRADDR_OFFSET, \
+ .baseaddr = SRC_RBUF_ ##num## _BASEADDR_OFFSET, \
+ .endaddr = SRC_RBUF_ ##num## _ENDADDR_OFFSET, \
+ .fmark = SRC_RBUF_ ##num## _FREE_MARK_OFFSET, \
+ .period_bytes = 0, \
+ .buf_size = 0, \
+})
+
+#define RINGBUF_REG_CAPTURE(num) ((struct ringbuf_regs) { \
+ .rdaddr = DST_RBUF_ ##num## _RDADDR_OFFSET, \
+ .wraddr = DST_RBUF_ ##num## _WRADDR_OFFSET, \
+ .baseaddr = DST_RBUF_ ##num## _BASEADDR_OFFSET, \
+ .endaddr = DST_RBUF_ ##num## _ENDADDR_OFFSET, \
+ .fmark = DST_RBUF_ ##num## _FULL_MARK_OFFSET, \
+ .period_bytes = 0, \
+ .buf_size = 0, \
+})
+
+enum cygnus_audio_port_type {
+ PORT_TDM,
+ PORT_SPDIF,
+};
+
+struct cygnus_ssp_regs {
+ u32 i2s_stream_cfg;
+ u32 i2s_cfg;
+ u32 i2s_cap_stream_cfg;
+ u32 i2s_cap_cfg;
+ u32 i2s_mclk_cfg;
+
+ u32 bf_destch_ctrl;
+ u32 bf_destch_cfg;
+ u32 bf_sourcech_ctrl;
+ u32 bf_sourcech_cfg;
+ u32 bf_sourcech_grp;
+};
+
+struct cygnus_track_clk {
+ bool cap_en;
+ bool play_en;
+ bool cap_clk_en;
+ bool play_clk_en;
+};
+
+struct cygnus_aio_port {
+ int portnum;
+ int mode;
+ bool is_slave;
+ int streams_on; /* will be 0 if both capture and play are off */
+ int fsync_width;
+ int port_type;
+
+ u32 mclk;
+ u32 lrclk;
+ u32 bit_per_frame;
+ u32 pll_clk_num;
+
+ struct cygnus_audio *cygaud;
+ struct cygnus_ssp_regs regs;
+
+ struct ringbuf_regs play_rb_regs;
+ struct ringbuf_regs capture_rb_regs;
+
+ struct snd_pcm_substream *play_stream;
+ struct snd_pcm_substream *capture_stream;
+
+ struct cygnus_track_clk clk_trace;
+};
+
+
+struct cygnus_audio {
+ struct cygnus_aio_port portinfo[CYGNUS_MAX_PORTS];
+
+ int irq_num;
+ void __iomem *audio;
+ struct device *dev;
+ void __iomem *i2s_in;
+
+ struct clk *audio_clk[CYGNUS_AUIDO_MAX_NUM_CLKS];
+ int active_ports;
+ unsigned long vco_rate;
+};
+
+extern int cygnus_ssp_get_mode(struct snd_soc_dai *cpu_dai);
+extern int cygnus_ssp_add_pll_tweak_controls(struct snd_soc_pcm_runtime *rtd);
+extern int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai,
+ int len);
+extern int cygnus_soc_platform_register(struct device *dev,
+ struct cygnus_audio *cygaud);
+extern int cygnus_soc_platform_unregister(struct device *dev);
+extern int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai,
+ int len);
+#endif
select SND_SOC_ADAU1977_SPI if SPI_MASTER
select SND_SOC_ADAU1977_I2C if I2C
select SND_SOC_ADAU1701 if I2C
+ select SND_SOC_ADAU7002
select SND_SOC_ADS117X
select SND_SOC_AK4104 if SPI_MASTER
select SND_SOC_AK4535 if I2C
select SND_SOC_BT_SCO
select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
select SND_SOC_CS35L32 if I2C
+ select SND_SOC_CS35L33 if I2C
select SND_SOC_CS42L51_I2C if I2C
select SND_SOC_CS42L52 if I2C && INPUT
select SND_SOC_CS42L56 if I2C && INPUT
select SND_SOC_CS42XX8_I2C if I2C
select SND_SOC_CS4349 if I2C
select SND_SOC_CS47L24 if MFD_CS47L24
+ select SND_SOC_CS53L30 if I2C
select SND_SOC_CX20442 if TTY
select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
select SND_SOC_DA7213 if I2C
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
select SND_SOC_MAX9850 if I2C
+ select SND_SOC_MAX9860 if I2C
select SND_SOC_MAX9768 if I2C
select SND_SOC_MAX9877 if I2C
select SND_SOC_MC13783 if MFD_MC13XXX
config SND_SOC_AD73311
tristate
+config SND_SOC_ADAU_UTILS
+ tristate
+
config SND_SOC_ADAU1373
tristate
+ select SND_SOC_ADAU_UTILS
config SND_SOC_ADAU1701
tristate "Analog Devices ADAU1701 CODEC"
config SND_SOC_ADAU17X1
tristate
select SND_SOC_SIGMADSP_REGMAP
+ select SND_SOC_ADAU_UTILS
config SND_SOC_ADAU1761
tristate
select SND_SOC_ADAU1977
select REGMAP_I2C
+config SND_SOC_ADAU7002
+ tristate "Analog Devices ADAU7002 Stereo PDM-to-I2S/TDM Converter"
+
config SND_SOC_ADAV80X
tristate
tristate
config SND_SOC_BT_SCO
- tristate
+ tristate "Dummy BT SCO codec driver"
config SND_SOC_CQ0093VC
tristate
tristate "Cirrus Logic CS35L32 CODEC"
depends on I2C
+config SND_SOC_CS35L33
+ tristate "Cirrus Logic CS35L33 CODEC"
+ depends on I2C
+
config SND_SOC_CS42L51
tristate
config SND_SOC_CS47L24
tristate
+# Cirrus Logic Quad-Channel ADC
+config SND_SOC_CS53L30
+ tristate "Cirrus Logic CS53L30 CODEC"
+ depends on I2C
+
config SND_SOC_CX20442
tristate
depends on TTY
tristate
config SND_SOC_HDMI_CODEC
- tristate
- select SND_PCM_ELD
- select SND_PCM_IEC958
+ tristate
+ select SND_PCM_ELD
+ select SND_PCM_IEC958
+ select HDMI
config SND_SOC_ES8328
tristate "Everest Semi ES8328 CODEC"
config SND_SOC_MAX98371
tristate
+config SND_SOC_MAX98504
+ tristate "Maxim MAX98504 speaker amplifier"
+ depends on I2C
+
config SND_SOC_MAX9867
tristate
config SND_SOC_MAX9850
tristate
+config SND_SOC_MAX9860
+ tristate "Maxim MAX9860 Mono Audio Voice Codec"
+ depends on I2C
+ select REGMAP_I2C
+
config SND_SOC_PCM1681
tristate "Texas Instruments PCM1681 CODEC"
depends on I2C
config SND_SOC_RT5514
tristate
+config SND_SOC_RT5514_SPI
+ tristate
+
config SND_SOC_RT5616
tristate "Realtek RT5616 CODEC"
depends on I2C
snd-soc-ad193x-i2c-objs := ad193x-i2c.o
snd-soc-ad1980-objs := ad1980.o
snd-soc-ad73311-objs := ad73311.o
+snd-soc-adau-utils-objs := adau-utils.o
snd-soc-adau1373-objs := adau1373.o
snd-soc-adau1701-objs := adau1701.o
snd-soc-adau17x1-objs := adau17x1.o
snd-soc-adau1977-objs := adau1977.o
snd-soc-adau1977-spi-objs := adau1977-spi.o
snd-soc-adau1977-i2c-objs := adau1977-i2c.o
+snd-soc-adau7002-objs := adau7002.o
snd-soc-adav80x-objs := adav80x.o
snd-soc-adav801-objs := adav801.o
snd-soc-adav803-objs := adav803.o
snd-soc-bt-sco-objs := bt-sco.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs35l32-objs := cs35l32.o
+snd-soc-cs35l33-objs := cs35l33.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
snd-soc-cs42l52-objs := cs42l52.o
snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
snd-soc-cs4349-objs := cs4349.o
snd-soc-cs47l24-objs := cs47l24.o
+snd-soc-cs53l30-objs := cs53l30.o
snd-soc-cx20442-objs := cx20442.o
snd-soc-da7210-objs := da7210.o
snd-soc-da7213-objs := da7213.o
snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
snd-soc-max9850-objs := max9850.o
+snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
snd-soc-nau8825-objs := nau8825.o
snd-soc-rt286-objs := rt286.o
snd-soc-rt298-objs := rt298.o
snd-soc-rt5514-objs := rt5514.o
+snd-soc-rt5514-spi-objs := rt5514-spi.o
snd-soc-rt5616-objs := rt5616.o
snd-soc-rt5631-objs := rt5631.o
snd-soc-rt5640-objs := rt5640.o
# Amp
snd-soc-max9877-objs := max9877.o
+snd-soc-max98504-objs := max98504.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-tas2552-objs := tas2552.o
obj-$(CONFIG_SND_SOC_AD193X_I2C) += snd-soc-ad193x-i2c.o
obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o
obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o
+obj-$(CONFIG_SND_SOC_ADAU_UTILS) += snd-soc-adau-utils.o
obj-$(CONFIG_SND_SOC_ADAU1373) += snd-soc-adau1373.o
obj-$(CONFIG_SND_SOC_ADAU1701) += snd-soc-adau1701.o
obj-$(CONFIG_SND_SOC_ADAU17X1) += snd-soc-adau17x1.o
obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o
obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o
obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o
+obj-$(CONFIG_SND_SOC_ADAU7002) += snd-soc-adau7002.o
obj-$(CONFIG_SND_SOC_ADAV80X) += snd-soc-adav80x.o
obj-$(CONFIG_SND_SOC_ADAV801) += snd-soc-adav801.o
obj-$(CONFIG_SND_SOC_ADAV803) += snd-soc-adav803.o
obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
+obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o
obj-$(CONFIG_SND_SOC_CS42L52) += snd-soc-cs42l52.o
obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
obj-$(CONFIG_SND_SOC_CS4349) += snd-soc-cs4349.o
obj-$(CONFIG_SND_SOC_CS47L24) += snd-soc-cs47l24.o
+obj-$(CONFIG_SND_SOC_CS53L30) += snd-soc-cs53l30.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
obj-$(CONFIG_SND_SOC_DA7213) += snd-soc-da7213.o
obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
+obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
obj-$(CONFIG_SND_SOC_RT286) += snd-soc-rt286.o
obj-$(CONFIG_SND_SOC_RT298) += snd-soc-rt298.o
obj-$(CONFIG_SND_SOC_RT5514) += snd-soc-rt5514.o
+obj-$(CONFIG_SND_SOC_RT5514_SPI) += snd-soc-rt5514-spi.o
obj-$(CONFIG_SND_SOC_RT5616) += snd-soc-rt5616.o
obj-$(CONFIG_SND_SOC_RT5631) += snd-soc-rt5631.o
obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
# Amp
obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
+obj-$(CONFIG_SND_SOC_MAX98504) += snd-soc-max98504.o
obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
--- /dev/null
+/*
+ * Shared helper functions for devices from the ADAU family
+ *
+ * Copyright 2011-2016 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/gcd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "adau-utils.h"
+
+int adau_calc_pll_cfg(unsigned int freq_in, unsigned int freq_out,
+ uint8_t regs[5])
+{
+ unsigned int r, n, m, i, j;
+ unsigned int div;
+
+ if (!freq_out) {
+ r = 0;
+ n = 0;
+ m = 0;
+ div = 0;
+ } else {
+ if (freq_out % freq_in != 0) {
+ div = DIV_ROUND_UP(freq_in, 13500000);
+ freq_in /= div;
+ r = freq_out / freq_in;
+ i = freq_out % freq_in;
+ j = gcd(i, freq_in);
+ n = i / j;
+ m = freq_in / j;
+ div--;
+ } else {
+ r = freq_out / freq_in;
+ n = 0;
+ m = 0;
+ div = 0;
+ }
+ if (n > 0xffff || m > 0xffff || div > 3 || r > 8 || r < 2)
+ return -EINVAL;
+ }
+
+ regs[0] = m >> 8;
+ regs[1] = m & 0xff;
+ regs[2] = n >> 8;
+ regs[3] = n & 0xff;
+ regs[4] = (r << 3) | (div << 1);
+ if (m != 0)
+ regs[4] |= 1; /* Fractional mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adau_calc_pll_cfg);
+
+MODULE_DESCRIPTION("ASoC ADAU audio CODECs shared helper functions");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+#ifndef SOUND_SOC_CODECS_ADAU_PLL_H
+#define SOUND_SOC_CODECS_ADAU_PLL_H
+
+int adau_calc_pll_cfg(unsigned int freq_in, unsigned int freq_out,
+ uint8_t regs[5]);
+
+#endif
#include <sound/adau1373.h>
#include "adau1373.h"
+#include "adau-utils.h"
struct adau1373_dai {
unsigned int clk_src;
{
struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int dpll_div = 0;
- unsigned int x, r, n, m, i, j, mode;
+ uint8_t pll_regs[5];
+ int ret;
switch (pll_id) {
case ADAU1373_PLL1:
dpll_div++;
}
- if (freq_out % freq_in != 0) {
- /* fout = fin * (r + (n/m)) / x */
- x = DIV_ROUND_UP(freq_in, 13500000);
- freq_in /= x;
- r = freq_out / freq_in;
- i = freq_out % freq_in;
- j = gcd(i, freq_in);
- n = i / j;
- m = freq_in / j;
- x--;
- mode = 1;
- } else {
- /* fout = fin / r */
- r = freq_out / freq_in;
- n = 0;
- m = 0;
- x = 0;
- mode = 0;
- }
-
- if (r < 2 || r > 8 || x > 3 || m > 0xffff || n > 0xffff)
+ ret = adau_calc_pll_cfg(freq_in, freq_out, pll_regs);
+ if (ret)
return -EINVAL;
if (dpll_div) {
regmap_write(adau1373->regmap, ADAU1373_DPLL_CTRL(pll_id),
(source << 4) | dpll_div);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL2(pll_id), m & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL4(pll_id), n & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL5(pll_id),
- (r << 3) | (x << 1) | mode);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL1(pll_id), pll_regs[0]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL2(pll_id), pll_regs[1]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL3(pll_id), pll_regs[2]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL4(pll_id), pll_regs[3]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL5(pll_id), pll_regs[4]);
/* Set sysclk to pll_rate / 4 */
regmap_update_bits(adau1373->regmap, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09);
static int adau1761_i2c_remove(struct i2c_client *client)
{
- snd_soc_unregister_codec(&client->dev);
+ adau17x1_remove(&client->dev);
return 0;
}
static int adau1761_spi_remove(struct spi_device *spi)
{
- snd_soc_unregister_codec(&spi->dev);
+ adau17x1_remove(&spi->dev);
return 0;
}
static int adau1781_i2c_remove(struct i2c_client *client)
{
- snd_soc_unregister_codec(&client->dev);
+ adau17x1_remove(&client->dev);
return 0;
}
static int adau1781_spi_remove(struct spi_device *spi)
{
- snd_soc_unregister_codec(&spi->dev);
+ adau17x1_remove(&spi->dev);
return 0;
}
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <sound/core.h>
#include "sigmadsp.h"
#include "adau17x1.h"
+#include "adau-utils.h"
static const char * const adau17x1_capture_mixer_boost_text[] = {
"Normal operation", "Boost Level 1", "Boost Level 2", "Boost Level 3",
}
EXPORT_SYMBOL_GPL(adau17x1_has_dsp);
+static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id,
+ int source, unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ if (freq_in < 8000000 || freq_in > 27000000)
+ return -EINVAL;
+
+ ret = adau_calc_pll_cfg(freq_in, freq_out, adau->pll_regs);
+ if (ret < 0)
+ return ret;
+
+ /* The PLL register is 6 bytes long and can only be written at once. */
+ ret = regmap_raw_write(adau->regmap, ADAU17X1_PLL_CONTROL,
+ adau->pll_regs, ARRAY_SIZE(adau->pll_regs));
+ if (ret)
+ return ret;
+
+ adau->pll_freq = freq_out;
+
+ return 0;
+}
+
+static int adau17x1_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(dai->codec);
+ struct adau *adau = snd_soc_codec_get_drvdata(dai->codec);
+ bool is_pll;
+ bool was_pll;
+
+ switch (clk_id) {
+ case ADAU17X1_CLK_SRC_MCLK:
+ is_pll = false;
+ break;
+ case ADAU17X1_CLK_SRC_PLL_AUTO:
+ if (!adau->mclk)
+ return -EINVAL;
+ /* Fall-through */
+ case ADAU17X1_CLK_SRC_PLL:
+ is_pll = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (adau->clk_src) {
+ case ADAU17X1_CLK_SRC_MCLK:
+ was_pll = false;
+ break;
+ case ADAU17X1_CLK_SRC_PLL:
+ case ADAU17X1_CLK_SRC_PLL_AUTO:
+ was_pll = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adau->sysclk = freq;
+
+ if (is_pll != was_pll) {
+ if (is_pll) {
+ snd_soc_dapm_add_routes(dapm,
+ &adau17x1_dapm_pll_route, 1);
+ } else {
+ snd_soc_dapm_del_routes(dapm,
+ &adau17x1_dapm_pll_route, 1);
+ }
+ }
+
+ adau->clk_src = clk_id;
+
+ return 0;
+}
+
+static int adau17x1_auto_pll(struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *params)
+{
+ struct adau *adau = snd_soc_dai_get_drvdata(dai);
+ unsigned int pll_rate;
+
+ switch (params_rate(params)) {
+ case 48000:
+ case 8000:
+ case 12000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 96000:
+ pll_rate = 48000 * 1024;
+ break;
+ case 44100:
+ case 7350:
+ case 11025:
+ case 14700:
+ case 22050:
+ case 29400:
+ case 88200:
+ pll_rate = 44100 * 1024;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adau17x1_set_dai_pll(dai, ADAU17X1_PLL, ADAU17X1_PLL_SRC_MCLK,
+ clk_get_rate(adau->mclk), pll_rate);
+}
+
static int adau17x1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
unsigned int freq;
int ret;
- if (adau->clk_src == ADAU17X1_CLK_SRC_PLL)
+ switch (adau->clk_src) {
+ case ADAU17X1_CLK_SRC_PLL_AUTO:
+ ret = adau17x1_auto_pll(dai, params);
+ if (ret)
+ return ret;
+ /* Fall-through */
+ case ADAU17X1_CLK_SRC_PLL:
freq = adau->pll_freq;
- else
+ break;
+ default:
freq = adau->sysclk;
+ break;
+ }
if (freq % params_rate(params) != 0)
return -EINVAL;
ADAU17X1_SERIAL_PORT1_DELAY_MASK, val);
}
-static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id,
- int source, unsigned int freq_in, unsigned int freq_out)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct adau *adau = snd_soc_codec_get_drvdata(codec);
- unsigned int r, n, m, i, j;
- unsigned int div;
- int ret;
-
- if (freq_in < 8000000 || freq_in > 27000000)
- return -EINVAL;
-
- if (!freq_out) {
- r = 0;
- n = 0;
- m = 0;
- div = 0;
- } else {
- if (freq_out % freq_in != 0) {
- div = DIV_ROUND_UP(freq_in, 13500000);
- freq_in /= div;
- r = freq_out / freq_in;
- i = freq_out % freq_in;
- j = gcd(i, freq_in);
- n = i / j;
- m = freq_in / j;
- div--;
- } else {
- r = freq_out / freq_in;
- n = 0;
- m = 0;
- div = 0;
- }
- if (n > 0xffff || m > 0xffff || div > 3 || r > 8 || r < 2)
- return -EINVAL;
- }
-
- adau->pll_regs[0] = m >> 8;
- adau->pll_regs[1] = m & 0xff;
- adau->pll_regs[2] = n >> 8;
- adau->pll_regs[3] = n & 0xff;
- adau->pll_regs[4] = (r << 3) | (div << 1);
- if (m != 0)
- adau->pll_regs[4] |= 1; /* Fractional mode */
-
- /* The PLL register is 6 bytes long and can only be written at once. */
- ret = regmap_raw_write(adau->regmap, ADAU17X1_PLL_CONTROL,
- adau->pll_regs, ARRAY_SIZE(adau->pll_regs));
- if (ret)
- return ret;
-
- adau->pll_freq = freq_out;
-
- return 0;
-}
-
-static int adau17x1_set_dai_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int freq, int dir)
-{
- struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(dai->codec);
- struct adau *adau = snd_soc_codec_get_drvdata(dai->codec);
-
- switch (clk_id) {
- case ADAU17X1_CLK_SRC_MCLK:
- case ADAU17X1_CLK_SRC_PLL:
- break;
- default:
- return -EINVAL;
- }
-
- adau->sysclk = freq;
-
- if (adau->clk_src != clk_id) {
- if (clk_id == ADAU17X1_CLK_SRC_PLL) {
- snd_soc_dapm_add_routes(dapm,
- &adau17x1_dapm_pll_route, 1);
- } else {
- snd_soc_dapm_del_routes(dapm,
- &adau17x1_dapm_pll_route, 1);
- }
- }
-
- adau->clk_src = clk_id;
-
- return 0;
-}
-
static int adau17x1_set_dai_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
ret = snd_soc_dapm_add_routes(dapm, adau17x1_no_dsp_dapm_routes,
ARRAY_SIZE(adau17x1_no_dsp_dapm_routes));
}
+
+ if (adau->clk_src != ADAU17X1_CLK_SRC_MCLK)
+ snd_soc_dapm_add_routes(dapm, &adau17x1_dapm_pll_route, 1);
+
return ret;
}
EXPORT_SYMBOL_GPL(adau17x1_add_routes);
const char *firmware_name)
{
struct adau *adau;
+ int ret;
if (IS_ERR(regmap))
return PTR_ERR(regmap);
if (!adau)
return -ENOMEM;
+ adau->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(adau->mclk)) {
+ if (PTR_ERR(adau->mclk) != -ENOENT)
+ return PTR_ERR(adau->mclk);
+ /* Clock is optional (for the driver) */
+ adau->mclk = NULL;
+ } else if (adau->mclk) {
+ adau->clk_src = ADAU17X1_CLK_SRC_PLL_AUTO;
+
+ /*
+ * Any valid PLL output rate will work at this point, use one
+ * that is likely to be chosen later as well. The register will
+ * be written when the PLL is powered up for the first time.
+ */
+ ret = adau_calc_pll_cfg(clk_get_rate(adau->mclk), 48000 * 1024,
+ adau->pll_regs);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(adau->mclk);
+ if (ret)
+ return ret;
+ }
+
adau->regmap = regmap;
adau->switch_mode = switch_mode;
adau->type = type;
}
EXPORT_SYMBOL_GPL(adau17x1_probe);
+void adau17x1_remove(struct device *dev)
+{
+ struct adau *adau = dev_get_drvdata(dev);
+
+ snd_soc_unregister_codec(dev);
+ if (adau->mclk)
+ clk_disable_unprepare(adau->mclk);
+}
+EXPORT_SYMBOL_GPL(adau17x1_remove);
+
MODULE_DESCRIPTION("ASoC ADAU1X61/ADAU1X81 common code");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
};
enum adau17x1_clk_src {
+ /* Automatically configure PLL based on the sample rate */
+ ADAU17X1_CLK_SRC_PLL_AUTO,
ADAU17X1_CLK_SRC_MCLK,
ADAU17X1_CLK_SRC_PLL,
};
+struct clk;
+
struct adau {
unsigned int sysclk;
unsigned int pll_freq;
+ struct clk *mclk;
enum adau17x1_clk_src clk_src;
enum adau17x1_type type;
int adau17x1_probe(struct device *dev, struct regmap *regmap,
enum adau17x1_type type, void (*switch_mode)(struct device *dev),
const char *firmware_name);
+void adau17x1_remove(struct device *dev);
int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec,
enum adau17x1_micbias_voltage micbias);
bool adau17x1_readable_register(struct device *dev, unsigned int reg);
--- /dev/null
+/*
+ * ADAU7002 Stereo PDM-to-I2S/TDM converter driver
+ *
+ * Copyright 2014-2016 Analog Devices
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+static const struct snd_soc_dapm_widget adau7002_widgets[] = {
+ SND_SOC_DAPM_INPUT("PDM_DAT"),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("IOVDD", 0, 0),
+};
+
+static const struct snd_soc_dapm_route adau7002_routes[] = {
+ { "Capture", NULL, "PDM_DAT" },
+ { "Capture", NULL, "IOVDD" },
+};
+
+static struct snd_soc_dai_driver adau7002_dai = {
+ .name = "adau7002-hifi",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE,
+ .sig_bits = 20,
+ },
+};
+
+static const struct snd_soc_codec_driver adau7002_codec_driver = {
+ .dapm_widgets = adau7002_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau7002_widgets),
+ .dapm_routes = adau7002_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau7002_routes),
+};
+
+static int adau7002_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_codec(&pdev->dev, &adau7002_codec_driver,
+ &adau7002_dai, 1);
+}
+
+static int adau7002_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id adau7002_dt_ids[] = {
+ { .compatible = "adi,adau7002", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adau7002_dt_ids);
+#endif
+
+static struct platform_driver adau7002_driver = {
+ .driver = {
+ .name = "adau7002",
+ .of_match_table = of_match_ptr(adau7002_dt_ids),
+ },
+ .probe = adau7002_probe,
+ .remove = adau7002_remove,
+};
+module_platform_driver(adau7002_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADAU7002 Stereo PDM-to-I2S/TDM Converter driver");
+MODULE_LICENSE("GPL v2");
.max_register = 0x16,
.reg_defaults = ak4613_reg,
.num_reg_defaults = ARRAY_SIZE(ak4613_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct of_device_id ak4613_of_match[] = {
.symmetric_rates = 1,
};
-static int ak4613_resume(struct snd_soc_codec *codec)
+static int ak4613_suspend(struct snd_soc_codec *codec)
{
struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+ regcache_cache_only(regmap, true);
regcache_mark_dirty(regmap);
+ return 0;
+}
+
+static int ak4613_resume(struct snd_soc_codec *codec)
+{
+ struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+
+ regcache_cache_only(regmap, false);
return regcache_sync(regmap);
}
static struct snd_soc_codec_driver soc_codec_dev_ak4613 = {
+ .suspend = ak4613_suspend,
.resume = ak4613_resume,
.set_bias_level = ak4613_set_bias_level,
.controls = ak4613_snd_controls,
static struct i2c_driver ak4613_i2c_driver = {
.driver = {
.name = "ak4613-codec",
- .owner = THIS_MODULE,
.of_match_table = ak4613_of_match,
},
.probe = ak4613_i2c_probe,
.symmetric_rates = 1,
};
-static int ak4642_resume(struct snd_soc_codec *codec)
+static int ak4642_suspend(struct snd_soc_codec *codec)
{
struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+ regcache_cache_only(regmap, true);
regcache_mark_dirty(regmap);
- regcache_sync(regmap);
return 0;
}
+static int ak4642_resume(struct snd_soc_codec *codec)
+{
+ struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+
+ regcache_cache_only(regmap, false);
+ regcache_sync(regmap);
+ return 0;
+}
static int ak4642_probe(struct snd_soc_codec *codec)
{
struct ak4642_priv *priv = snd_soc_codec_get_drvdata(codec);
static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
.probe = ak4642_probe,
+ .suspend = ak4642_suspend,
.resume = ak4642_resume,
.set_bias_level = ak4642_set_bias_level,
.controls = ak4642_snd_controls,
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
- bool manual_ena = false;
int val;
- switch (arizona->type) {
- case WM5102:
- switch (arizona->rev) {
- case 0:
- break;
- default:
- manual_ena = true;
- break;
- }
- default:
- break;
- }
-
switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (!priv->spk_ena && manual_ena) {
- regmap_write_async(arizona->regmap, 0x4f5, 0x25a);
- priv->spk_ena_pending = true;
- }
- break;
case SND_SOC_DAPM_POST_PMU:
val = snd_soc_read(codec, ARIZONA_INTERRUPT_RAW_STATUS_3);
if (val & ARIZONA_SPK_OVERHEAT_STS) {
regmap_update_bits_async(arizona->regmap,
ARIZONA_OUTPUT_ENABLES_1,
1 << w->shift, 1 << w->shift);
-
- if (priv->spk_ena_pending) {
- msleep(75);
- regmap_write_async(arizona->regmap, 0x4f5, 0xda);
- priv->spk_ena_pending = false;
- priv->spk_ena++;
- }
break;
case SND_SOC_DAPM_PRE_PMD:
- if (manual_ena) {
- priv->spk_ena--;
- if (!priv->spk_ena)
- regmap_write_async(arizona->regmap,
- 0x4f5, 0x25a);
- }
-
regmap_update_bits_async(arizona->regmap,
ARIZONA_OUTPUT_ENABLES_1,
1 << w->shift, 0);
break;
- case SND_SOC_DAPM_POST_PMD:
- if (manual_ena) {
- if (!priv->spk_ena)
- regmap_write_async(arizona->regmap,
- 0x4f5, 0x0da);
- }
- break;
default:
break;
}
}
EXPORT_SYMBOL_GPL(arizona_init_gpio);
+int arizona_init_notifiers(struct snd_soc_codec *codec)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&arizona->notifier);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_notifiers);
+
const char * const arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS] = {
"None",
"Tone Generator 1",
arizona_rate_text, arizona_rate_val);
EXPORT_SYMBOL_GPL(arizona_asrc_rate1);
-static const char *arizona_vol_ramp_text[] = {
+static const char * const arizona_vol_ramp_text[] = {
"0ms/6dB", "0.5ms/6dB", "1ms/6dB", "2ms/6dB", "4ms/6dB", "8ms/6dB",
"15ms/6dB", "30ms/6dB",
};
arizona_vol_ramp_text);
EXPORT_SYMBOL_GPL(arizona_out_vi_ramp);
-static const char *arizona_lhpf_mode_text[] = {
+static const char * const arizona_lhpf_mode_text[] = {
"Low-pass", "High-pass"
};
arizona_lhpf_mode_text);
EXPORT_SYMBOL_GPL(arizona_lhpf4_mode);
-static const char *arizona_ng_hold_text[] = {
+static const char * const arizona_ng_hold_text[] = {
"30ms", "120ms", "250ms", "500ms",
};
};
EXPORT_SYMBOL_GPL(arizona_output_anc_src);
+const struct snd_kcontrol_new arizona_voice_trigger_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0),
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 1, 1, 0),
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 2, 1, 0),
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 3, 1, 0),
+};
+EXPORT_SYMBOL_GPL(arizona_voice_trigger_switch);
+
static void arizona_in_set_vu(struct snd_soc_codec *codec, int ena)
{
struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
}
EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
+int arizona_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb,
+ int (*notify)(struct notifier_block *nb,
+ unsigned long action, void *data))
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ nb->notifier_call = notify;
+
+ return blocking_notifier_chain_register(&arizona->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(arizona_register_notifier);
+
+int arizona_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ return blocking_notifier_chain_unregister(&arizona->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(arizona_unregister_notifier);
+
MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
#define ARIZONA_DVFS_SR1_RQ 0x001
#define ARIZONA_DVFS_ADSP1_RQ 0x100
+/* Notifier events */
+#define ARIZONA_NOTIFY_VOICE_TRIGGER 0x1
+
struct arizona;
struct wm_adsp;
unsigned int out_down_pending;
unsigned int out_down_delay;
- unsigned int spk_ena:2;
- unsigned int spk_ena_pending:1;
-
unsigned int dvfs_reqs;
struct mutex dvfs_lock;
bool dvfs_cached;
};
+struct arizona_voice_trigger_info {
+ int core;
+};
+
#define ARIZONA_NUM_MIXER_INPUTS 104
extern const unsigned int arizona_mixer_tlv[];
extern const struct soc_enum arizona_anc_ng_enum;
extern const struct soc_enum arizona_output_anc_src[];
+extern const struct snd_kcontrol_new arizona_voice_trigger_switch[];
+
extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event);
extern int arizona_init_spk(struct snd_soc_codec *codec);
extern int arizona_init_gpio(struct snd_soc_codec *codec);
extern int arizona_init_mono(struct snd_soc_codec *codec);
+extern int arizona_init_notifiers(struct snd_soc_codec *codec);
extern int arizona_free_spk(struct snd_soc_codec *codec);
extern bool arizona_input_analog(struct snd_soc_codec *codec, int shift);
extern const char *arizona_sample_rate_val_to_name(unsigned int rate_val);
+
+extern int arizona_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb,
+ int (*notify)(struct notifier_block *nb,
+ unsigned long action,
+ void *data));
+extern int arizona_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb);
+
#endif
{ "TX", NULL, "Playback" },
};
-static struct snd_soc_dai_driver bt_sco_dai = {
- .name = "bt-sco-pcm",
- .playback = {
- .stream_name = "Playback",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+static struct snd_soc_dai_driver bt_sco_dai[] = {
+ {
+ .name = "bt-sco-pcm",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
},
+ {
+ .name = "bt-sco-pcm-wb",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ }
};
static struct snd_soc_codec_driver soc_codec_dev_bt_sco = {
static int bt_sco_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_bt_sco,
- &bt_sco_dai, 1);
+ bt_sco_dai, ARRAY_SIZE(bt_sco_dai));
}
static int bt_sco_remove(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id bt_sco_codec_of_match[] = {
{ .compatible = "delta,dfbmcs320", },
+ { .compatible = "linux,bt-sco", },
{},
};
MODULE_DEVICE_TABLE(of, bt_sco_codec_of_match);
--- /dev/null
+/*
+ * cs35l33.c -- CS35L33 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <paul.handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <sound/cs35l33.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+
+#include "cs35l33.h"
+
+#define CS35L33_BOOT_DELAY 50
+
+struct cs35l33_private {
+ struct snd_soc_codec *codec;
+ struct cs35l33_pdata pdata;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ bool amp_cal;
+ int mclk_int;
+ struct regulator_bulk_data core_supplies[2];
+ int num_core_supplies;
+ bool is_tdm_mode;
+ bool enable_soft_ramp;
+};
+
+static const struct reg_default cs35l33_reg[] = {
+ {CS35L33_PWRCTL1, 0x85},
+ {CS35L33_PWRCTL2, 0xFE},
+ {CS35L33_CLK_CTL, 0x0C},
+ {CS35L33_BST_PEAK_CTL, 0x90},
+ {CS35L33_PROTECT_CTL, 0x55},
+ {CS35L33_BST_CTL1, 0x00},
+ {CS35L33_BST_CTL2, 0x01},
+ {CS35L33_ADSP_CTL, 0x00},
+ {CS35L33_ADC_CTL, 0xC8},
+ {CS35L33_DAC_CTL, 0x14},
+ {CS35L33_DIG_VOL_CTL, 0x00},
+ {CS35L33_CLASSD_CTL, 0x04},
+ {CS35L33_AMP_CTL, 0x90},
+ {CS35L33_INT_MASK_1, 0xFF},
+ {CS35L33_INT_MASK_2, 0xFF},
+ {CS35L33_DIAG_LOCK, 0x00},
+ {CS35L33_DIAG_CTRL_1, 0x40},
+ {CS35L33_DIAG_CTRL_2, 0x00},
+ {CS35L33_HG_MEMLDO_CTL, 0x62},
+ {CS35L33_HG_REL_RATE, 0x03},
+ {CS35L33_LDO_DEL, 0x12},
+ {CS35L33_HG_HEAD, 0x0A},
+ {CS35L33_HG_EN, 0x05},
+ {CS35L33_TX_VMON, 0x00},
+ {CS35L33_TX_IMON, 0x03},
+ {CS35L33_TX_VPMON, 0x02},
+ {CS35L33_TX_VBSTMON, 0x05},
+ {CS35L33_TX_FLAG, 0x06},
+ {CS35L33_TX_EN1, 0x00},
+ {CS35L33_TX_EN2, 0x00},
+ {CS35L33_TX_EN3, 0x00},
+ {CS35L33_TX_EN4, 0x00},
+ {CS35L33_RX_AUD, 0x40},
+ {CS35L33_RX_SPLY, 0x03},
+ {CS35L33_RX_ALIVE, 0x04},
+ {CS35L33_BST_CTL4, 0x63},
+};
+
+static const struct reg_sequence cs35l33_patch[] = {
+ { 0x00, 0x99, 0 },
+ { 0x59, 0x02, 0 },
+ { 0x52, 0x30, 0 },
+ { 0x39, 0x45, 0 },
+ { 0x57, 0x30, 0 },
+ { 0x2C, 0x68, 0 },
+ { 0x00, 0x00, 0 },
+};
+
+static bool cs35l33_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L33_DEVID_AB:
+ case CS35L33_DEVID_CD:
+ case CS35L33_DEVID_E:
+ case CS35L33_REV_ID:
+ case CS35L33_INT_STATUS_1:
+ case CS35L33_INT_STATUS_2:
+ case CS35L33_HG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l33_writeable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ /* these are read only registers */
+ case CS35L33_DEVID_AB:
+ case CS35L33_DEVID_CD:
+ case CS35L33_DEVID_E:
+ case CS35L33_REV_ID:
+ case CS35L33_INT_STATUS_1:
+ case CS35L33_INT_STATUS_2:
+ case CS35L33_HG_STATUS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cs35l33_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L33_DEVID_AB:
+ case CS35L33_DEVID_CD:
+ case CS35L33_DEVID_E:
+ case CS35L33_REV_ID:
+ case CS35L33_PWRCTL1:
+ case CS35L33_PWRCTL2:
+ case CS35L33_CLK_CTL:
+ case CS35L33_BST_PEAK_CTL:
+ case CS35L33_PROTECT_CTL:
+ case CS35L33_BST_CTL1:
+ case CS35L33_BST_CTL2:
+ case CS35L33_ADSP_CTL:
+ case CS35L33_ADC_CTL:
+ case CS35L33_DAC_CTL:
+ case CS35L33_DIG_VOL_CTL:
+ case CS35L33_CLASSD_CTL:
+ case CS35L33_AMP_CTL:
+ case CS35L33_INT_MASK_1:
+ case CS35L33_INT_MASK_2:
+ case CS35L33_INT_STATUS_1:
+ case CS35L33_INT_STATUS_2:
+ case CS35L33_DIAG_LOCK:
+ case CS35L33_DIAG_CTRL_1:
+ case CS35L33_DIAG_CTRL_2:
+ case CS35L33_HG_MEMLDO_CTL:
+ case CS35L33_HG_REL_RATE:
+ case CS35L33_LDO_DEL:
+ case CS35L33_HG_HEAD:
+ case CS35L33_HG_EN:
+ case CS35L33_TX_VMON:
+ case CS35L33_TX_IMON:
+ case CS35L33_TX_VPMON:
+ case CS35L33_TX_VBSTMON:
+ case CS35L33_TX_FLAG:
+ case CS35L33_TX_EN1:
+ case CS35L33_TX_EN2:
+ case CS35L33_TX_EN3:
+ case CS35L33_TX_EN4:
+ case CS35L33_RX_AUD:
+ case CS35L33_RX_SPLY:
+ case CS35L33_RX_ALIVE:
+ case CS35L33_BST_CTL4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(classd_ctl_tlv, 900, 100, 0);
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10200, 50, 0);
+
+static const struct snd_kcontrol_new cs35l33_snd_controls[] = {
+
+ SOC_SINGLE_TLV("SPK Amp Volume", CS35L33_AMP_CTL,
+ 4, 0x09, 0, classd_ctl_tlv),
+ SOC_SINGLE_SX_TLV("DAC Volume", CS35L33_DIG_VOL_CTL,
+ 0, 0x34, 0xE4, dac_tlv),
+};
+
+static int cs35l33_spkrdrv_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (!priv->amp_cal) {
+ usleep_range(8000, 9000);
+ priv->amp_cal = true;
+ regmap_update_bits(priv->regmap, CS35L33_CLASSD_CTL,
+ CS35L33_AMP_CAL, 0);
+ dev_dbg(codec->dev, "Amp calibration done\n");
+ }
+ dev_dbg(codec->dev, "Amp turned on\n");
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev, "Amp turned off\n");
+ break;
+ default:
+ dev_err(codec->dev, "Invalid event = 0x%x\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static int cs35l33_sdin_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int val;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_BST, 0);
+ val = priv->is_tdm_mode ? 0 : CS35L33_PDN_TDM;
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_PDN_TDM, val);
+ dev_dbg(codec->dev, "BST turned on\n");
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev, "SDIN turned on\n");
+ if (!priv->amp_cal) {
+ regmap_update_bits(priv->regmap, CS35L33_CLASSD_CTL,
+ CS35L33_AMP_CAL, CS35L33_AMP_CAL);
+ dev_dbg(codec->dev, "Amp calibration started\n");
+ usleep_range(10000, 11000);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_PDN_TDM, CS35L33_PDN_TDM);
+ usleep_range(4000, 4100);
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_BST, CS35L33_PDN_BST);
+ dev_dbg(codec->dev, "BST and SDIN turned off\n");
+ break;
+ default:
+ dev_err(codec->dev, "Invalid event = 0x%x\n", event);
+
+ }
+
+ return 0;
+}
+
+static int cs35l33_sdout_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int mask = CS35L33_SDOUT_3ST_I2S | CS35L33_PDN_TDM;
+ unsigned int mask2 = CS35L33_SDOUT_3ST_TDM;
+ unsigned int val, val2;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (priv->is_tdm_mode) {
+ /* set sdout_3st_i2s and reset pdn_tdm */
+ val = CS35L33_SDOUT_3ST_I2S;
+ /* reset sdout_3st_tdm */
+ val2 = 0;
+ } else {
+ /* reset sdout_3st_i2s and set pdn_tdm */
+ val = CS35L33_PDN_TDM;
+ /* set sdout_3st_tdm */
+ val2 = CS35L33_SDOUT_3ST_TDM;
+ }
+ dev_dbg(codec->dev, "SDOUT turned on\n");
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ val = CS35L33_SDOUT_3ST_I2S | CS35L33_PDN_TDM;
+ val2 = CS35L33_SDOUT_3ST_TDM;
+ dev_dbg(codec->dev, "SDOUT turned off\n");
+ break;
+ default:
+ dev_err(codec->dev, "Invalid event = 0x%x\n", event);
+ return 0;
+ }
+
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ mask, val);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ mask2, val2);
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cs35l33_dapm_widgets[] = {
+
+ SND_SOC_DAPM_OUTPUT("SPK"),
+ SND_SOC_DAPM_OUT_DRV_E("SPKDRV", CS35L33_PWRCTL1, 7, 1, NULL, 0,
+ cs35l33_spkrdrv_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("SDIN", NULL, 0, CS35L33_PWRCTL2,
+ 2, 1, cs35l33_sdin_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_INPUT("MON"),
+
+ SND_SOC_DAPM_ADC("VMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_VMON_SHIFT, 1),
+ SND_SOC_DAPM_ADC("IMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_IMON_SHIFT, 1),
+ SND_SOC_DAPM_ADC("VPMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_VPMON_SHIFT, 1),
+ SND_SOC_DAPM_ADC("VBSTMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_VBSTMON_SHIFT, 1),
+
+ SND_SOC_DAPM_AIF_OUT_E("SDOUT", NULL, 0, SND_SOC_NOPM, 0, 0,
+ cs35l33_sdout_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+};
+
+static const struct snd_soc_dapm_route cs35l33_audio_map[] = {
+ {"SDIN", NULL, "CS35L33 Playback"},
+ {"SPKDRV", NULL, "SDIN"},
+ {"SPK", NULL, "SPKDRV"},
+
+ {"VMON", NULL, "MON"},
+ {"IMON", NULL, "MON"},
+
+ {"SDOUT", NULL, "VMON"},
+ {"SDOUT", NULL, "IMON"},
+ {"CS35L33 Capture", NULL, "SDOUT"},
+};
+
+static const struct snd_soc_dapm_route cs35l33_vphg_auto_route[] = {
+ {"SPKDRV", NULL, "VPMON"},
+ {"VPMON", NULL, "CS35L33 Playback"},
+};
+
+static const struct snd_soc_dapm_route cs35l33_vp_vbst_mon_route[] = {
+ {"SDOUT", NULL, "VPMON"},
+ {"VPMON", NULL, "MON"},
+ {"SDOUT", NULL, "VBSTMON"},
+ {"VBSTMON", NULL, "MON"},
+};
+
+static int cs35l33_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ unsigned int val;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_ALL, 0);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIS, 0);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_ALL, CS35L33_PDN_ALL);
+ regmap_read(priv->regmap, CS35L33_INT_STATUS_2, &val);
+ usleep_range(1000, 1100);
+ if (val & CS35L33_PDN_DONE)
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIS, CS35L33_MCLKDIS);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct cs35l33_mclk_div {
+ int mclk;
+ int srate;
+ u8 adsp_rate;
+ u8 int_fs_ratio;
+};
+
+static const struct cs35l33_mclk_div cs35l33_mclk_coeffs[] = {
+ /* MCLK, Sample Rate, adsp_rate, int_fs_ratio */
+ {5644800, 11025, 0x4, CS35L33_INT_FS_RATE},
+ {5644800, 22050, 0x8, CS35L33_INT_FS_RATE},
+ {5644800, 44100, 0xC, CS35L33_INT_FS_RATE},
+
+ {6000000, 8000, 0x1, 0},
+ {6000000, 11025, 0x2, 0},
+ {6000000, 11029, 0x3, 0},
+ {6000000, 12000, 0x4, 0},
+ {6000000, 16000, 0x5, 0},
+ {6000000, 22050, 0x6, 0},
+ {6000000, 22059, 0x7, 0},
+ {6000000, 24000, 0x8, 0},
+ {6000000, 32000, 0x9, 0},
+ {6000000, 44100, 0xA, 0},
+ {6000000, 44118, 0xB, 0},
+ {6000000, 48000, 0xC, 0},
+
+ {6144000, 8000, 0x1, CS35L33_INT_FS_RATE},
+ {6144000, 12000, 0x4, CS35L33_INT_FS_RATE},
+ {6144000, 16000, 0x5, CS35L33_INT_FS_RATE},
+ {6144000, 24000, 0x8, CS35L33_INT_FS_RATE},
+ {6144000, 32000, 0x9, CS35L33_INT_FS_RATE},
+ {6144000, 48000, 0xC, CS35L33_INT_FS_RATE},
+};
+
+static int cs35l33_get_mclk_coeff(int mclk, int srate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l33_mclk_coeffs); i++) {
+ if (cs35l33_mclk_coeffs[i].mclk == mclk &&
+ cs35l33_mclk_coeffs[i].srate == srate)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int cs35l33_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ regmap_update_bits(priv->regmap, CS35L33_ADSP_CTL,
+ CS35L33_MS_MASK, CS35L33_MS_MASK);
+ dev_dbg(codec->dev, "Audio port in master mode\n");
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ regmap_update_bits(priv->regmap, CS35L33_ADSP_CTL,
+ CS35L33_MS_MASK, 0);
+ dev_dbg(codec->dev, "Audio port in slave mode\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ /*
+ * tdm mode in cs35l33 resembles dsp-a mode very
+ * closely, it is dsp-a with fsync shifted left by half bclk
+ */
+ priv->is_tdm_mode = true;
+ dev_dbg(codec->dev, "Audio port in TDM mode\n");
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ priv->is_tdm_mode = false;
+ dev_dbg(codec->dev, "Audio port in I2S mode\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs35l33_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ int sample_size = params_width(params);
+ int coeff = cs35l33_get_mclk_coeff(priv->mclk_int, params_rate(params));
+
+ if (coeff < 0)
+ return coeff;
+
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_ADSP_FS | CS35L33_INT_FS_RATE,
+ cs35l33_mclk_coeffs[coeff].int_fs_ratio
+ | cs35l33_mclk_coeffs[coeff].adsp_rate);
+
+ if (priv->is_tdm_mode) {
+ sample_size = (sample_size / 8) - 1;
+ if (sample_size > 2)
+ sample_size = 2;
+ regmap_update_bits(priv->regmap, CS35L33_RX_AUD,
+ CS35L33_AUDIN_RX_DEPTH,
+ sample_size << CS35L33_AUDIN_RX_DEPTH_SHIFT);
+ }
+
+ dev_dbg(codec->dev, "sample rate=%d, bits per sample=%d\n",
+ params_rate(params), params_width(params));
+
+ return 0;
+}
+
+static const unsigned int cs35l33_src_rates[] = {
+ 8000, 11025, 11029, 12000, 16000, 22050,
+ 22059, 24000, 32000, 44100, 44118, 48000
+};
+
+static const struct snd_pcm_hw_constraint_list cs35l33_constraints = {
+ .count = ARRAY_SIZE(cs35l33_src_rates),
+ .list = cs35l33_src_rates,
+};
+
+static int cs35l33_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &cs35l33_constraints);
+ return 0;
+}
+
+static int cs35l33_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ if (tristate) {
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_SDOUT_3ST_I2S, CS35L33_SDOUT_3ST_I2S);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_SDOUT_3ST_TDM, CS35L33_SDOUT_3ST_TDM);
+ } else {
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_SDOUT_3ST_I2S, 0);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_SDOUT_3ST_TDM, 0);
+ }
+
+ return 0;
+}
+
+static int cs35l33_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg, bit_pos, i;
+ int slot, slot_num;
+
+ if (slot_width != 8)
+ return -EINVAL;
+
+ /* scan rx_mask for aud slot */
+ slot = ffs(rx_mask) - 1;
+ if (slot >= 0) {
+ regmap_update_bits(priv->regmap, CS35L33_RX_AUD,
+ CS35L33_X_LOC, slot);
+ dev_dbg(codec->dev, "Audio starts from slots %d", slot);
+ }
+
+ /*
+ * scan tx_mask: vmon(2 slots); imon (2 slots);
+ * vpmon (1 slot) vbstmon (1 slot)
+ */
+ slot = ffs(tx_mask) - 1;
+ slot_num = 0;
+
+ for (i = 0; i < 2 ; i++) {
+ /* disable vpmon/vbstmon: enable later if set in tx_mask */
+ regmap_update_bits(priv->regmap, CS35L33_TX_VPMON + i,
+ CS35L33_X_STATE | CS35L33_X_LOC, CS35L33_X_STATE
+ | CS35L33_X_LOC);
+ }
+
+ /* disconnect {vp,vbst}_mon routes: eanble later if set in tx_mask*/
+ snd_soc_dapm_del_routes(dapm, cs35l33_vp_vbst_mon_route,
+ ARRAY_SIZE(cs35l33_vp_vbst_mon_route));
+
+ while (slot >= 0) {
+ /* configure VMON_TX_LOC */
+ if (slot_num == 0) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_VMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ dev_dbg(codec->dev, "VMON enabled in slots %d-%d",
+ slot, slot + 1);
+ }
+
+ /* configure IMON_TX_LOC */
+ if (slot_num == 3) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_IMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ dev_dbg(codec->dev, "IMON enabled in slots %d-%d",
+ slot, slot + 1);
+ }
+
+ /* configure VPMON_TX_LOC */
+ if (slot_num == 4) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_VPMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ snd_soc_dapm_add_routes(dapm,
+ &cs35l33_vp_vbst_mon_route[0], 2);
+ dev_dbg(codec->dev, "VPMON enabled in slots %d", slot);
+ }
+
+ /* configure VBSTMON_TX_LOC */
+ if (slot_num == 5) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_VBSTMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ snd_soc_dapm_add_routes(dapm,
+ &cs35l33_vp_vbst_mon_route[2], 2);
+ dev_dbg(codec->dev,
+ "VBSTMON enabled in slots %d", slot);
+ }
+
+ /* Enable the relevant tx slot */
+ reg = CS35L33_TX_EN4 - (slot/8);
+ bit_pos = slot - ((slot / 8) * (8));
+ regmap_update_bits(priv->regmap, reg,
+ 1 << bit_pos, 1 << bit_pos);
+
+ tx_mask &= ~(1 << slot);
+ slot = ffs(tx_mask) - 1;
+ slot_num++;
+ }
+
+ return 0;
+}
+
+static int cs35l33_codec_set_sysclk(struct snd_soc_codec *codec,
+ int clk_id, int source, unsigned int freq, int dir)
+{
+ struct cs35l33_private *cs35l33 = snd_soc_codec_get_drvdata(codec);
+
+ switch (freq) {
+ case CS35L33_MCLK_5644:
+ case CS35L33_MCLK_6:
+ case CS35L33_MCLK_6144:
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIV2, 0);
+ cs35l33->mclk_int = freq;
+ break;
+ case CS35L33_MCLK_11289:
+ case CS35L33_MCLK_12:
+ case CS35L33_MCLK_12288:
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIV2, CS35L33_MCLKDIV2);
+ cs35l33->mclk_int = freq/2;
+ break;
+ default:
+ cs35l33->mclk_int = 0;
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "external mclk freq=%d, internal mclk freq=%d\n",
+ freq, cs35l33->mclk_int);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops cs35l33_ops = {
+ .startup = cs35l33_pcm_startup,
+ .set_tristate = cs35l33_set_tristate,
+ .set_fmt = cs35l33_set_dai_fmt,
+ .hw_params = cs35l33_pcm_hw_params,
+ .set_tdm_slot = cs35l33_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver cs35l33_dai = {
+ .name = "cs35l33-dai",
+ .id = 0,
+ .playback = {
+ .stream_name = "CS35L33 Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = CS35L33_RATES,
+ .formats = CS35L33_FORMATS,
+ },
+ .capture = {
+ .stream_name = "CS35L33 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CS35L33_RATES,
+ .formats = CS35L33_FORMATS,
+ },
+ .ops = &cs35l33_ops,
+ .symmetric_rates = 1,
+};
+
+static int cs35l33_set_hg_data(struct snd_soc_codec *codec,
+ struct cs35l33_pdata *pdata)
+{
+ struct cs35l33_hg *hg_config = &pdata->hg_config;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ if (hg_config->enable_hg_algo) {
+ regmap_update_bits(priv->regmap, CS35L33_HG_MEMLDO_CTL,
+ CS35L33_MEM_DEPTH_MASK,
+ hg_config->mem_depth << CS35L33_MEM_DEPTH_SHIFT);
+ regmap_write(priv->regmap, CS35L33_HG_REL_RATE,
+ hg_config->release_rate);
+ regmap_update_bits(priv->regmap, CS35L33_HG_HEAD,
+ CS35L33_HD_RM_MASK,
+ hg_config->hd_rm << CS35L33_HD_RM_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_HG_MEMLDO_CTL,
+ CS35L33_LDO_THLD_MASK,
+ hg_config->ldo_thld << CS35L33_LDO_THLD_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_HG_MEMLDO_CTL,
+ CS35L33_LDO_DISABLE_MASK,
+ hg_config->ldo_path_disable <<
+ CS35L33_LDO_DISABLE_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_LDO_DEL,
+ CS35L33_LDO_ENTRY_DELAY_MASK,
+ hg_config->ldo_entry_delay <<
+ CS35L33_LDO_ENTRY_DELAY_SHIFT);
+ if (hg_config->vp_hg_auto) {
+ regmap_update_bits(priv->regmap, CS35L33_HG_EN,
+ CS35L33_VP_HG_AUTO_MASK,
+ CS35L33_VP_HG_AUTO_MASK);
+ snd_soc_dapm_add_routes(dapm, cs35l33_vphg_auto_route,
+ ARRAY_SIZE(cs35l33_vphg_auto_route));
+ }
+ regmap_update_bits(priv->regmap, CS35L33_HG_EN,
+ CS35L33_VP_HG_MASK,
+ hg_config->vp_hg << CS35L33_VP_HG_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_LDO_DEL,
+ CS35L33_VP_HG_RATE_MASK,
+ hg_config->vp_hg_rate << CS35L33_VP_HG_RATE_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_LDO_DEL,
+ CS35L33_VP_HG_VA_MASK,
+ hg_config->vp_hg_va << CS35L33_VP_HG_VA_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_HG_EN,
+ CS35L33_CLASS_HG_EN_MASK, CS35L33_CLASS_HG_EN_MASK);
+ }
+ return 0;
+}
+
+static int cs35l33_set_bst_ipk(struct snd_soc_codec *codec, unsigned int bst)
+{
+ struct cs35l33_private *cs35l33 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0, steps = 0;
+
+ /* Boost current in uA */
+ if (bst > 3600000 || bst < 1850000) {
+ dev_err(codec->dev, "Invalid boost current %d\n", bst);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (bst % 15625) {
+ dev_err(codec->dev, "Current not a multiple of 15625uA (%d)\n",
+ bst);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ while (bst > 1850000) {
+ bst -= 15625;
+ steps++;
+ }
+
+ regmap_write(cs35l33->regmap, CS35L33_BST_PEAK_CTL,
+ steps+0x70);
+
+err:
+ return ret;
+}
+
+static int cs35l33_probe(struct snd_soc_codec *codec)
+{
+ struct cs35l33_private *cs35l33 = snd_soc_codec_get_drvdata(codec);
+
+ cs35l33->codec = codec;
+ pm_runtime_get_sync(codec->dev);
+
+ regmap_update_bits(cs35l33->regmap, CS35L33_PROTECT_CTL,
+ CS35L33_ALIVE_WD_DIS, 0x8);
+ regmap_update_bits(cs35l33->regmap, CS35L33_BST_CTL2,
+ CS35L33_ALIVE_WD_DIS2,
+ CS35L33_ALIVE_WD_DIS2);
+
+ /* Set Platform Data */
+ regmap_update_bits(cs35l33->regmap, CS35L33_BST_CTL1,
+ CS35L33_BST_CTL_MASK, cs35l33->pdata.boost_ctl);
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLASSD_CTL,
+ CS35L33_AMP_DRV_SEL_MASK,
+ cs35l33->pdata.amp_drv_sel << CS35L33_AMP_DRV_SEL_SHIFT);
+
+ if (cs35l33->pdata.boost_ipk)
+ cs35l33_set_bst_ipk(codec, cs35l33->pdata.boost_ipk);
+
+ if (cs35l33->enable_soft_ramp) {
+ snd_soc_update_bits(codec, CS35L33_DAC_CTL,
+ CS35L33_DIGSFT, CS35L33_DIGSFT);
+ snd_soc_update_bits(codec, CS35L33_DAC_CTL,
+ CS35L33_DSR_RATE, cs35l33->pdata.ramp_rate);
+ } else {
+ snd_soc_update_bits(codec, CS35L33_DAC_CTL,
+ CS35L33_DIGSFT, 0);
+ }
+
+ /* update IMON scaling rate if different from default of 0x8 */
+ if (cs35l33->pdata.imon_adc_scale != 0x8)
+ snd_soc_update_bits(codec, CS35L33_ADC_CTL,
+ CS35L33_IMON_SCALE, cs35l33->pdata.imon_adc_scale);
+
+ cs35l33_set_hg_data(codec, &(cs35l33->pdata));
+
+ /*
+ * unmask important interrupts that causes the chip to enter
+ * speaker safe mode and hence deserves user attention
+ */
+ regmap_update_bits(cs35l33->regmap, CS35L33_INT_MASK_1,
+ CS35L33_M_OTE | CS35L33_M_OTW | CS35L33_M_AMP_SHORT |
+ CS35L33_M_CAL_ERR, 0);
+
+ pm_runtime_put_sync(codec->dev);
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_cs35l33 = {
+ .probe = cs35l33_probe,
+
+ .set_bias_level = cs35l33_set_bias_level,
+ .set_sysclk = cs35l33_codec_set_sysclk,
+
+ .dapm_widgets = cs35l33_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l33_dapm_widgets),
+ .dapm_routes = cs35l33_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l33_audio_map),
+ .controls = cs35l33_snd_controls,
+ .num_controls = ARRAY_SIZE(cs35l33_snd_controls),
+
+ .idle_bias_off = true,
+};
+
+static const struct regmap_config cs35l33_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CS35L33_MAX_REGISTER,
+ .reg_defaults = cs35l33_reg,
+ .num_reg_defaults = ARRAY_SIZE(cs35l33_reg),
+ .volatile_reg = cs35l33_volatile_register,
+ .readable_reg = cs35l33_readable_register,
+ .writeable_reg = cs35l33_writeable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static int __maybe_unused cs35l33_runtime_resume(struct device *dev)
+{
+ struct cs35l33_private *cs35l33 = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 0);
+
+ ret = regulator_bulk_enable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable core supplies: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(cs35l33->regmap, false);
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 1);
+
+ msleep(CS35L33_BOOT_DELAY);
+
+ ret = regcache_sync(cs35l33->regmap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to restore register cache\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_cache_only(cs35l33->regmap, true);
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return ret;
+}
+
+static int __maybe_unused cs35l33_runtime_suspend(struct device *dev)
+{
+ struct cs35l33_private *cs35l33 = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* redo the calibration in next power up */
+ cs35l33->amp_cal = false;
+
+ regcache_cache_only(cs35l33->regmap, true);
+ regcache_mark_dirty(cs35l33->regmap);
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cs35l33_pm_ops = {
+ SET_RUNTIME_PM_OPS(cs35l33_runtime_suspend,
+ cs35l33_runtime_resume,
+ NULL)
+};
+
+static int cs35l33_get_hg_data(const struct device_node *np,
+ struct cs35l33_pdata *pdata)
+{
+ struct device_node *hg;
+ struct cs35l33_hg *hg_config = &pdata->hg_config;
+ u32 val32;
+
+ hg = of_get_child_by_name(np, "cirrus,hg-algo");
+ hg_config->enable_hg_algo = hg ? true : false;
+
+ if (hg_config->enable_hg_algo) {
+ if (of_property_read_u32(hg, "cirrus,mem-depth", &val32) >= 0)
+ hg_config->mem_depth = val32;
+ if (of_property_read_u32(hg, "cirrus,release-rate",
+ &val32) >= 0)
+ hg_config->release_rate = val32;
+ if (of_property_read_u32(hg, "cirrus,ldo-thld", &val32) >= 0)
+ hg_config->ldo_thld = val32;
+ if (of_property_read_u32(hg, "cirrus,ldo-path-disable",
+ &val32) >= 0)
+ hg_config->ldo_path_disable = val32;
+ if (of_property_read_u32(hg, "cirrus,ldo-entry-delay",
+ &val32) >= 0)
+ hg_config->ldo_entry_delay = val32;
+
+ hg_config->vp_hg_auto = of_property_read_bool(hg,
+ "cirrus,vp-hg-auto");
+
+ if (of_property_read_u32(hg, "cirrus,vp-hg", &val32) >= 0)
+ hg_config->vp_hg = val32;
+ if (of_property_read_u32(hg, "cirrus,vp-hg-rate", &val32) >= 0)
+ hg_config->vp_hg_rate = val32;
+ if (of_property_read_u32(hg, "cirrus,vp-hg-va", &val32) >= 0)
+ hg_config->vp_hg_va = val32;
+ }
+
+ of_node_put(hg);
+
+ return 0;
+}
+
+static irqreturn_t cs35l33_irq_thread(int irq, void *data)
+{
+ struct cs35l33_private *cs35l33 = data;
+ struct snd_soc_codec *codec = cs35l33->codec;
+ unsigned int sticky_val1, sticky_val2, current_val, mask1, mask2;
+
+ regmap_read(cs35l33->regmap, CS35L33_INT_STATUS_2,
+ &sticky_val2);
+ regmap_read(cs35l33->regmap, CS35L33_INT_STATUS_1,
+ &sticky_val1);
+ regmap_read(cs35l33->regmap, CS35L33_INT_MASK_2, &mask2);
+ regmap_read(cs35l33->regmap, CS35L33_INT_MASK_1, &mask1);
+
+ /* Check to see if the unmasked bits are active,
+ * if not then exit.
+ */
+ if (!(sticky_val1 & ~mask1) && !(sticky_val2 & ~mask2))
+ return IRQ_NONE;
+
+ regmap_read(cs35l33->regmap, CS35L33_INT_STATUS_1,
+ ¤t_val);
+
+ /* handle the interrupts */
+
+ if (sticky_val1 & CS35L33_AMP_SHORT) {
+ dev_crit(codec->dev, "Amp short error\n");
+ if (!(current_val & CS35L33_AMP_SHORT)) {
+ dev_dbg(codec->dev,
+ "Amp short error release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL,
+ CS35L33_AMP_SHORT_RLS, 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL,
+ CS35L33_AMP_SHORT_RLS,
+ CS35L33_AMP_SHORT_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_AMP_SHORT_RLS,
+ 0);
+ }
+ }
+
+ if (sticky_val1 & CS35L33_CAL_ERR) {
+ dev_err(codec->dev, "Cal error\n");
+
+ /* redo the calibration in next power up */
+ cs35l33->amp_cal = false;
+
+ if (!(current_val & CS35L33_CAL_ERR)) {
+ dev_dbg(codec->dev, "Cal error release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_CAL_ERR_RLS,
+ 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_CAL_ERR_RLS,
+ CS35L33_CAL_ERR_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_CAL_ERR_RLS,
+ 0);
+ }
+ }
+
+ if (sticky_val1 & CS35L33_OTE) {
+ dev_crit(codec->dev, "Over temperature error\n");
+ if (!(current_val & CS35L33_OTE)) {
+ dev_dbg(codec->dev,
+ "Over temperature error release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTE_RLS, 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTE_RLS,
+ CS35L33_OTE_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTE_RLS, 0);
+ }
+ }
+
+ if (sticky_val1 & CS35L33_OTW) {
+ dev_err(codec->dev, "Over temperature warning\n");
+ if (!(current_val & CS35L33_OTW)) {
+ dev_dbg(codec->dev,
+ "Over temperature warning release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTW_RLS, 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTW_RLS,
+ CS35L33_OTW_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTW_RLS, 0);
+ }
+ }
+ if (CS35L33_ALIVE_ERR & sticky_val1)
+ dev_err(codec->dev, "ERROR: ADSPCLK Interrupt\n");
+
+ if (CS35L33_MCLK_ERR & sticky_val1)
+ dev_err(codec->dev, "ERROR: MCLK Interrupt\n");
+
+ if (CS35L33_VMON_OVFL & sticky_val2)
+ dev_err(codec->dev,
+ "ERROR: VMON Overflow Interrupt\n");
+
+ if (CS35L33_IMON_OVFL & sticky_val2)
+ dev_err(codec->dev,
+ "ERROR: IMON Overflow Interrupt\n");
+
+ if (CS35L33_VPMON_OVFL & sticky_val2)
+ dev_err(codec->dev,
+ "ERROR: VPMON Overflow Interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static const char * const cs35l33_core_supplies[] = {
+ "VA",
+ "VP",
+};
+
+static int cs35l33_of_get_pdata(struct device *dev,
+ struct cs35l33_private *cs35l33)
+{
+ struct device_node *np = dev->of_node;
+ struct cs35l33_pdata *pdata = &cs35l33->pdata;
+ u32 val32;
+
+ if (!np)
+ return 0;
+
+ if (of_property_read_u32(np, "cirrus,boost-ctl", &val32) >= 0) {
+ pdata->boost_ctl = val32;
+ pdata->amp_drv_sel = 1;
+ }
+
+ if (of_property_read_u32(np, "cirrus,ramp-rate", &val32) >= 0) {
+ pdata->ramp_rate = val32;
+ cs35l33->enable_soft_ramp = true;
+ }
+
+ if (of_property_read_u32(np, "cirrus,boost-ipk", &val32) >= 0)
+ pdata->boost_ipk = val32;
+
+ if (of_property_read_u32(np, "cirrus,imon-adc-scale", &val32) >= 0) {
+ if ((val32 == 0x0) || (val32 == 0x7) || (val32 == 0x6))
+ pdata->imon_adc_scale = val32;
+ else
+ /* use default value */
+ pdata->imon_adc_scale = 0x8;
+ } else {
+ /* use default value */
+ pdata->imon_adc_scale = 0x8;
+ }
+
+ cs35l33_get_hg_data(np, pdata);
+
+ return 0;
+}
+
+static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct cs35l33_private *cs35l33;
+ struct cs35l33_pdata *pdata = dev_get_platdata(&i2c_client->dev);
+ int ret, devid, i;
+ unsigned int reg;
+
+ cs35l33 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l33_private),
+ GFP_KERNEL);
+ if (!cs35l33)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c_client, cs35l33);
+ cs35l33->regmap = devm_regmap_init_i2c(i2c_client, &cs35l33_regmap);
+ if (IS_ERR(cs35l33->regmap)) {
+ ret = PTR_ERR(cs35l33->regmap);
+ dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(cs35l33->regmap, true);
+
+ for (i = 0; i < ARRAY_SIZE(cs35l33_core_supplies); i++)
+ cs35l33->core_supplies[i].supply
+ = cs35l33_core_supplies[i];
+ cs35l33->num_core_supplies = ARRAY_SIZE(cs35l33_core_supplies);
+
+ ret = devm_regulator_bulk_get(&i2c_client->dev,
+ cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to request core supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (pdata) {
+ cs35l33->pdata = *pdata;
+ } else {
+ cs35l33_of_get_pdata(&i2c_client->dev, cs35l33);
+ pdata = &cs35l33->pdata;
+ }
+
+ ret = devm_request_threaded_irq(&i2c_client->dev, i2c_client->irq, NULL,
+ cs35l33_irq_thread, IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "cs35l33", cs35l33);
+ if (ret != 0)
+ dev_warn(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+ /* We could issue !RST or skip it based on AMP topology */
+ cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+ "reset-gpios", GPIOD_OUT_HIGH);
+ if (IS_ERR(cs35l33->reset_gpio)) {
+ dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
+ __func__);
+ return PTR_ERR(cs35l33->reset_gpio);
+ }
+
+ ret = regulator_bulk_enable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to enable core supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 1);
+
+ msleep(CS35L33_BOOT_DELAY);
+ regcache_cache_only(cs35l33->regmap, false);
+
+ /* initialize codec */
+ ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_AB, ®);
+ devid = (reg & 0xFF) << 12;
+ ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_CD, ®);
+ devid |= (reg & 0xFF) << 4;
+ ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_E, ®);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS35L33_CHIP_ID) {
+ dev_err(&i2c_client->dev,
+ "CS35L33 Device ID (%X). Expected ID %X\n",
+ devid, CS35L33_CHIP_ID);
+ goto err_enable;
+ }
+
+ ret = regmap_read(cs35l33->regmap, CS35L33_REV_ID, ®);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+ goto err_enable;
+ }
+
+ dev_info(&i2c_client->dev,
+ "Cirrus Logic CS35L33, Revision: %02X\n", reg & 0xFF);
+
+ ret = regmap_register_patch(cs35l33->regmap,
+ cs35l33_patch, ARRAY_SIZE(cs35l33_patch));
+ if (ret < 0) {
+ dev_err(&i2c_client->dev,
+ "Error in applying regmap patch: %d\n", ret);
+ goto err_enable;
+ }
+
+ /* disable mclk and tdm */
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIS | CS35L33_SDOUT_3ST_TDM,
+ CS35L33_MCLKDIS | CS35L33_SDOUT_3ST_TDM);
+
+ pm_runtime_set_autosuspend_delay(&i2c_client->dev, 100);
+ pm_runtime_use_autosuspend(&i2c_client->dev);
+ pm_runtime_set_active(&i2c_client->dev);
+ pm_runtime_enable(&i2c_client->dev);
+
+ ret = snd_soc_register_codec(&i2c_client->dev,
+ &soc_codec_dev_cs35l33, &cs35l33_dai, 1);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "%s: Register codec failed\n",
+ __func__);
+ goto err_enable;
+ }
+
+ return 0;
+
+err_enable:
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return ret;
+}
+
+static int cs35l33_i2c_remove(struct i2c_client *client)
+{
+ struct cs35l33_private *cs35l33 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&client->dev);
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 0);
+
+ pm_runtime_disable(&client->dev);
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return 0;
+}
+
+static const struct of_device_id cs35l33_of_match[] = {
+ { .compatible = "cirrus,cs35l33", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs35l33_of_match);
+
+static const struct i2c_device_id cs35l33_id[] = {
+ {"cs35l33", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs35l33_id);
+
+static struct i2c_driver cs35l33_i2c_driver = {
+ .driver = {
+ .name = "cs35l33",
+ .pm = &cs35l33_pm_ops,
+ .of_match_table = cs35l33_of_match,
+
+ },
+ .id_table = cs35l33_id,
+ .probe = cs35l33_i2c_probe,
+ .remove = cs35l33_i2c_remove,
+
+};
+module_i2c_driver(cs35l33_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS35L33 driver");
+MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <paul.handrigan@cirrus.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * cs35l33.h -- CS35L33 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <paul.handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS35L33_H__
+#define __CS35L33_H__
+
+#define CS35L33_CHIP_ID 0x00035A33
+#define CS35L33_DEVID_AB 0x01 /* Device ID A & B [RO] */
+#define CS35L33_DEVID_CD 0x02 /* Device ID C & D [RO] */
+#define CS35L33_DEVID_E 0x03 /* Device ID E [RO] */
+#define CS35L33_FAB_ID 0x04 /* Fab ID [RO] */
+#define CS35L33_REV_ID 0x05 /* Revision ID [RO] */
+#define CS35L33_PWRCTL1 0x06 /* Power Ctl 1 */
+#define CS35L33_PWRCTL2 0x07 /* Power Ctl 2 */
+#define CS35L33_CLK_CTL 0x08 /* Clock Ctl */
+#define CS35L33_BST_PEAK_CTL 0x09 /* Max Current for Boost */
+#define CS35L33_PROTECT_CTL 0x0A /* Amp Protection Parameters */
+#define CS35L33_BST_CTL1 0x0B /* Boost Converter CTL1 */
+#define CS35L33_BST_CTL2 0x0C /* Boost Converter CTL2 */
+#define CS35L33_ADSP_CTL 0x0D /* Serial Port Control */
+#define CS35L33_ADC_CTL 0x0E /* ADC Control */
+#define CS35L33_DAC_CTL 0x0F /* DAC Control */
+#define CS35L33_DIG_VOL_CTL 0x10 /* Digital Volume CTL */
+#define CS35L33_CLASSD_CTL 0x11 /* Class D Amp CTL */
+#define CS35L33_AMP_CTL 0x12 /* Amp Gain/Protecton Release CTL */
+#define CS35L33_INT_MASK_1 0x13 /* Interrupt Mask 1 */
+#define CS35L33_INT_MASK_2 0x14 /* Interrupt Mask 2 */
+#define CS35L33_INT_STATUS_1 0x15 /* Interrupt Status 1 [RO] */
+#define CS35L33_INT_STATUS_2 0x16 /* Interrupt Status 2 [RO] */
+#define CS35L33_DIAG_LOCK 0x17 /* Diagnostic Mode Register Lock */
+#define CS35L33_DIAG_CTRL_1 0x18 /* Diagnostic Mode Register Control */
+#define CS35L33_DIAG_CTRL_2 0x19 /* Diagnostic Mode Register Control 2 */
+#define CS35L33_HG_MEMLDO_CTL 0x23 /* H/G Memory/LDO CTL */
+#define CS35L33_HG_REL_RATE 0x24 /* H/G Release Rate */
+#define CS35L33_LDO_DEL 0x25 /* LDO Entry Delay/VPhg Control 1 */
+#define CS35L33_HG_HEAD 0x29 /* H/G Headroom */
+#define CS35L33_HG_EN 0x2A /* H/G Enable/VPhg CNT2 */
+#define CS35L33_TX_VMON 0x2D /* TDM TX Control 1 (VMON) */
+#define CS35L33_TX_IMON 0x2E /* TDM TX Control 2 (IMON) */
+#define CS35L33_TX_VPMON 0x2F /* TDM TX Control 3 (VPMON) */
+#define CS35L33_TX_VBSTMON 0x30 /* TDM TX Control 4 (VBSTMON) */
+#define CS35L33_TX_FLAG 0x31 /* TDM TX Control 5 (FLAG) */
+#define CS35L33_TX_EN1 0x32 /* TDM TX Enable 1 */
+#define CS35L33_TX_EN2 0x33 /* TDM TX Enable 2 */
+#define CS35L33_TX_EN3 0x34 /* TDM TX Enable 3 */
+#define CS35L33_TX_EN4 0x35 /* TDM TX Enable 4 */
+#define CS35L33_RX_AUD 0x36 /* TDM RX Control 1 */
+#define CS35L33_RX_SPLY 0x37 /* TDM RX Control 2 */
+#define CS35L33_RX_ALIVE 0x38 /* TDM RX Control 3 */
+#define CS35L33_BST_CTL4 0x39 /* Boost Converter Control 4 */
+#define CS35L33_HG_STATUS 0x3F /* H/G Status */
+#define CS35L33_MAX_REGISTER 0x59
+
+#define CS35L33_MCLK_5644 5644800
+#define CS35L33_MCLK_6144 6144000
+#define CS35L33_MCLK_6 6000000
+#define CS35L33_MCLK_11289 11289600
+#define CS35L33_MCLK_12 12000000
+#define CS35L33_MCLK_12288 12288000
+
+/* CS35L33_PWRCTL1 */
+#define CS35L33_PDN_AMP (1 << 7)
+#define CS35L33_PDN_BST (1 << 2)
+#define CS35L33_PDN_ALL 1
+
+/* CS35L33_PWRCTL2 */
+#define CS35L33_PDN_VMON_SHIFT 7
+#define CS35L33_PDN_VMON (1 << CS35L33_PDN_VMON_SHIFT)
+#define CS35L33_PDN_IMON_SHIFT 6
+#define CS35L33_PDN_IMON (1 << CS35L33_PDN_IMON_SHIFT)
+#define CS35L33_PDN_VPMON_SHIFT 5
+#define CS35L33_PDN_VPMON (1 << CS35L33_PDN_VPMON_SHIFT)
+#define CS35L33_PDN_VBSTMON_SHIFT 4
+#define CS35L33_PDN_VBSTMON (1 << CS35L33_PDN_VBSTMON_SHIFT)
+#define CS35L33_SDOUT_3ST_I2S_SHIFT 3
+#define CS35L33_SDOUT_3ST_I2S (1 << CS35L33_SDOUT_3ST_I2S_SHIFT)
+#define CS35L33_PDN_SDIN_SHIFT 2
+#define CS35L33_PDN_SDIN (1 << CS35L33_PDN_SDIN_SHIFT)
+#define CS35L33_PDN_TDM_SHIFT 1
+#define CS35L33_PDN_TDM (1 << CS35L33_PDN_TDM_SHIFT)
+
+/* CS35L33_CLK_CTL */
+#define CS35L33_MCLKDIS (1 << 7)
+#define CS35L33_MCLKDIV2 (1 << 6)
+#define CS35L33_SDOUT_3ST_TDM (1 << 5)
+#define CS35L33_INT_FS_RATE (1 << 4)
+#define CS35L33_ADSP_FS 0xF
+
+/* CS35L33_PROTECT_CTL */
+#define CS35L33_ALIVE_WD_DIS (3 << 2)
+
+/* CS35L33_BST_CTL1 */
+#define CS35L33_BST_CTL_SRC (1 << 6)
+#define CS35L33_BST_CTL_SHIFT (1 << 5)
+#define CS35L33_BST_CTL_MASK 0x3F
+
+/* CS35L33_BST_CTL2 */
+#define CS35L33_TDM_WD_SEL (1 << 4)
+#define CS35L33_ALIVE_WD_DIS2 (1 << 3)
+#define CS35L33_VBST_SR_STEP 0x3
+
+/* CS35L33_ADSP_CTL */
+#define CS35L33_ADSP_DRIVE (1 << 7)
+#define CS35L33_MS_MASK (1 << 6)
+#define CS35L33_SDIN_LOC (3 << 4)
+#define CS35L33_ALIVE_RATE 0x3
+
+/* CS35L33_ADC_CTL */
+#define CS35L33_INV_VMON (1 << 7)
+#define CS35L33_INV_IMON (1 << 6)
+#define CS35L33_ADC_NOTCH_DIS (1 << 5)
+#define CS35L33_IMON_SCALE 0xF
+
+/* CS35L33_DAC_CTL */
+#define CS35L33_INV_DAC (1 << 7)
+#define CS35L33_DAC_NOTCH_DIS (1 << 5)
+#define CS35L33_DIGSFT (1 << 4)
+#define CS35L33_DSR_RATE 0xF
+
+/* CS35L33_CLASSD_CTL */
+#define CS35L33_AMP_SD (1 << 6)
+#define CS35L33_AMP_DRV_SEL_SRC (1 << 5)
+#define CS35L33_AMP_DRV_SEL_MASK 0x10
+#define CS35L33_AMP_DRV_SEL_SHIFT 4
+#define CS35L33_AMP_CAL (1 << 3)
+#define CS35L33_GAIN_CHG_ZC_MASK 0x04
+#define CS35L33_GAIN_CHG_ZC_SHIFT 2
+#define CS35L33_CLASS_D_CTL_MASK 0x3F
+
+/* CS35L33_AMP_CTL */
+#define CS35L33_AMP_GAIN 0xF0
+#define CS35L33_CAL_ERR_RLS (1 << 3)
+#define CS35L33_AMP_SHORT_RLS (1 << 2)
+#define CS35L33_OTW_RLS (1 << 1)
+#define CS35L33_OTE_RLS 1
+
+/* CS35L33_INT_MASK_1 */
+#define CS35L33_M_CAL_ERR_SHIFT 6
+#define CS35L33_M_CAL_ERR (1 << CS35L33_M_CAL_ERR_SHIFT)
+#define CS35L33_M_ALIVE_ERR_SHIFT 5
+#define CS35L33_M_ALIVE_ERR (1 << CS35L33_M_ALIVE_ERR_SHIFT)
+#define CS35L33_M_AMP_SHORT_SHIFT 2
+#define CS35L33_M_AMP_SHORT (1 << CS35L33_M_AMP_SHORT_SHIFT)
+#define CS35L33_M_OTW_SHIFT 1
+#define CS35L33_M_OTW (1 << CS35L33_M_OTW_SHIFT)
+#define CS35L33_M_OTE_SHIFT 0
+#define CS35L33_M_OTE (1 << CS35L33_M_OTE_SHIFT)
+
+/* CS35L33_INT_STATUS_1 */
+#define CS35L33_CAL_ERR (1 << 6)
+#define CS35L33_ALIVE_ERR (1 << 5)
+#define CS35L33_ADSPCLK_ERR (1 << 4)
+#define CS35L33_MCLK_ERR (1 << 3)
+#define CS35L33_AMP_SHORT (1 << 2)
+#define CS35L33_OTW (1 << 1)
+#define CS35L33_OTE (1 << 0)
+
+/* CS35L33_INT_STATUS_2 */
+#define CS35L33_VMON_OVFL (1 << 7)
+#define CS35L33_IMON_OVFL (1 << 6)
+#define CS35L33_VPMON_OVFL (1 << 5)
+#define CS35L33_VBSTMON_OVFL (1 << 4)
+#define CS35L33_PDN_DONE 1
+
+/* CS35L33_BST_CTL4 */
+#define CS35L33_BST_RGS 0x70
+#define CS35L33_BST_COEFF3 0xF
+
+/* CS35L33_HG_MEMLDO_CTL */
+#define CS35L33_MEM_DEPTH_SHIFT 5
+#define CS35L33_MEM_DEPTH_MASK (0x3 << CS35L33_MEM_DEPTH_SHIFT)
+#define CS35L33_LDO_THLD_SHIFT 1
+#define CS35L33_LDO_THLD_MASK (0xF << CS35L33_LDO_THLD_SHIFT)
+#define CS35L33_LDO_DISABLE_SHIFT 0
+#define CS35L33_LDO_DISABLE_MASK (0x1 << CS35L33_LDO_DISABLE_SHIFT)
+
+/* CS35L33_LDO_DEL */
+#define CS35L33_VP_HG_VA_SHIFT 5
+#define CS35L33_VP_HG_VA_MASK (0x7 << CS35L33_VP_HG_VA_SHIFT)
+#define CS35L33_LDO_ENTRY_DELAY_SHIFT 2
+#define CS35L33_LDO_ENTRY_DELAY_MASK (0x7 << CS35L33_LDO_ENTRY_DELAY_SHIFT)
+#define CS35L33_VP_HG_RATE_SHIFT 0
+#define CS35L33_VP_HG_RATE_MASK (0x3 << CS35L33_VP_HG_RATE_SHIFT)
+
+/* CS35L33_HG_HEAD */
+#define CS35L33_HD_RM_SHIFT 0
+#define CS35L33_HD_RM_MASK (0x7F << CS35L33_HD_RM_SHIFT)
+
+/* CS35L33_HG_EN */
+#define CS35L33_CLASS_HG_ENA_SHIFT 7
+#define CS35L33_CLASS_HG_EN_MASK (0x1 << CS35L33_CLASS_HG_ENA_SHIFT)
+#define CS35L33_VP_HG_AUTO_SHIFT 6
+#define CS35L33_VP_HG_AUTO_MASK (0x1 << 6)
+#define CS35L33_VP_HG_SHIFT 0
+#define CS35L33_VP_HG_MASK (0x1F << CS35L33_VP_HG_SHIFT)
+
+#define CS35L33_RATES (SNDRV_PCM_RATE_8000_48000)
+#define CS35L33_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+/* CS35L33_{RX,TX}_X */
+#define CS35L33_X_STATE_SHIFT 7
+#define CS35L33_X_STATE (1 << CS35L33_X_STATE_SHIFT)
+#define CS35L33_X_LOC_SHIFT 0
+#define CS35L33_X_LOC (0x1F << CS35L33_X_LOC_SHIFT)
+
+/* CS35L33_RX_AUD */
+#define CS35L33_AUDIN_RX_DEPTH_SHIFT 5
+#define CS35L33_AUDIN_RX_DEPTH (0x7 << CS35L33_AUDIN_RX_DEPTH_SHIFT)
+
+#endif
SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"),
+SND_SOC_DAPM_OUTPUT("DSP Voice Trigger"),
+
+SND_SOC_DAPM_SWITCH("DSP3 Voice Trigger", SND_SOC_NOPM, 2, 0,
+ &arizona_voice_trigger_switch[2]),
+
SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
0, NULL, 0, arizona_in_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
+ { "DRC2 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
{ "DRC2 Signal Activity", NULL, "DRC2L" },
{ "DRC2 Signal Activity", NULL, "DRC2R" },
+
+ { "DSP Voice Trigger", NULL, "SYSCLK" },
+ { "DSP Voice Trigger", NULL, "DSP3 Voice Trigger" },
+ { "DSP3 Voice Trigger", "Switch", "DSP3" },
};
static int cs47l24_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
{
struct cs47l24_priv *priv = data;
struct arizona *arizona = priv->core.arizona;
+ struct arizona_voice_trigger_info info;
int serviced = 0;
int i, ret;
ret = wm_adsp_compr_handle_irq(&priv->core.adsp[i]);
if (ret != -ENODEV)
serviced++;
+ if (ret == WM_ADSP_COMPR_VOICE_TRIGGER) {
+ info.core = i;
+ arizona_call_notifiers(arizona,
+ ARIZONA_NOTIFY_VOICE_TRIGGER,
+ &info);
+ }
}
if (!serviced) {
arizona_init_spk(codec);
arizona_init_gpio(codec);
arizona_init_mono(codec);
+ arizona_init_notifiers(codec);
ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
"ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
--- /dev/null
+/*
+ * cs53l30.c -- CS53l30 ALSA Soc Audio driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Authors: Paul Handrigan <Paul.Handrigan@cirrus.com>,
+ * Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "cs53l30.h"
+
+#define CS53L30_NUM_SUPPLIES 2
+static const char *const cs53l30_supply_names[CS53L30_NUM_SUPPLIES] = {
+ "VA",
+ "VP",
+};
+
+struct cs53l30_private {
+ struct regulator_bulk_data supplies[CS53L30_NUM_SUPPLIES];
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *mute_gpio;
+ struct clk *mclk;
+ bool use_sdout2;
+ u32 mclk_rate;
+};
+
+static const struct reg_default cs53l30_reg_defaults[] = {
+ { CS53L30_PWRCTL, CS53L30_PWRCTL_DEFAULT },
+ { CS53L30_MCLKCTL, CS53L30_MCLKCTL_DEFAULT },
+ { CS53L30_INT_SR_CTL, CS53L30_INT_SR_CTL_DEFAULT },
+ { CS53L30_MICBIAS_CTL, CS53L30_MICBIAS_CTL_DEFAULT },
+ { CS53L30_ASPCFG_CTL, CS53L30_ASPCFG_CTL_DEFAULT },
+ { CS53L30_ASP_CTL1, CS53L30_ASP_CTL1_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL1, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL2, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL3, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL4, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN1, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN2, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN3, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN4, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN5, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN6, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_CTL2, CS53L30_ASP_CTL2_DEFAULT },
+ { CS53L30_SFT_RAMP, CS53L30_SFT_RMP_DEFAULT },
+ { CS53L30_LRCK_CTL1, CS53L30_LRCK_CTLx_DEFAULT },
+ { CS53L30_LRCK_CTL2, CS53L30_LRCK_CTLx_DEFAULT },
+ { CS53L30_MUTEP_CTL1, CS53L30_MUTEP_CTL1_DEFAULT },
+ { CS53L30_MUTEP_CTL2, CS53L30_MUTEP_CTL2_DEFAULT },
+ { CS53L30_INBIAS_CTL1, CS53L30_INBIAS_CTL1_DEFAULT },
+ { CS53L30_INBIAS_CTL2, CS53L30_INBIAS_CTL2_DEFAULT },
+ { CS53L30_DMIC1_STR_CTL, CS53L30_DMIC1_STR_CTL_DEFAULT },
+ { CS53L30_DMIC2_STR_CTL, CS53L30_DMIC2_STR_CTL_DEFAULT },
+ { CS53L30_ADCDMIC1_CTL1, CS53L30_ADCDMICx_CTL1_DEFAULT },
+ { CS53L30_ADCDMIC1_CTL2, CS53L30_ADCDMIC1_CTL2_DEFAULT },
+ { CS53L30_ADC1_CTL3, CS53L30_ADCx_CTL3_DEFAULT },
+ { CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_CTL_DEFAULT },
+ { CS53L30_ADC1A_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC1B_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC1A_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_ADC1B_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_ADCDMIC2_CTL1, CS53L30_ADCDMICx_CTL1_DEFAULT },
+ { CS53L30_ADCDMIC2_CTL2, CS53L30_ADCDMIC1_CTL2_DEFAULT },
+ { CS53L30_ADC2_CTL3, CS53L30_ADCx_CTL3_DEFAULT },
+ { CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_CTL_DEFAULT },
+ { CS53L30_ADC2A_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC2B_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC2A_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_ADC2B_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_INT_MASK, CS53L30_DEVICE_INT_MASK },
+};
+
+static bool cs53l30_volatile_register(struct device *dev, unsigned int reg)
+{
+ if (reg == CS53L30_IS)
+ return true;
+ else
+ return false;
+}
+
+static bool cs53l30_writeable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS53L30_DEVID_AB:
+ case CS53L30_DEVID_CD:
+ case CS53L30_DEVID_E:
+ case CS53L30_REVID:
+ case CS53L30_IS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cs53l30_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS53L30_DEVID_AB:
+ case CS53L30_DEVID_CD:
+ case CS53L30_DEVID_E:
+ case CS53L30_REVID:
+ case CS53L30_PWRCTL:
+ case CS53L30_MCLKCTL:
+ case CS53L30_INT_SR_CTL:
+ case CS53L30_MICBIAS_CTL:
+ case CS53L30_ASPCFG_CTL:
+ case CS53L30_ASP_CTL1:
+ case CS53L30_ASP_TDMTX_CTL1:
+ case CS53L30_ASP_TDMTX_CTL2:
+ case CS53L30_ASP_TDMTX_CTL3:
+ case CS53L30_ASP_TDMTX_CTL4:
+ case CS53L30_ASP_TDMTX_EN1:
+ case CS53L30_ASP_TDMTX_EN2:
+ case CS53L30_ASP_TDMTX_EN3:
+ case CS53L30_ASP_TDMTX_EN4:
+ case CS53L30_ASP_TDMTX_EN5:
+ case CS53L30_ASP_TDMTX_EN6:
+ case CS53L30_ASP_CTL2:
+ case CS53L30_SFT_RAMP:
+ case CS53L30_LRCK_CTL1:
+ case CS53L30_LRCK_CTL2:
+ case CS53L30_MUTEP_CTL1:
+ case CS53L30_MUTEP_CTL2:
+ case CS53L30_INBIAS_CTL1:
+ case CS53L30_INBIAS_CTL2:
+ case CS53L30_DMIC1_STR_CTL:
+ case CS53L30_DMIC2_STR_CTL:
+ case CS53L30_ADCDMIC1_CTL1:
+ case CS53L30_ADCDMIC1_CTL2:
+ case CS53L30_ADC1_CTL3:
+ case CS53L30_ADC1_NG_CTL:
+ case CS53L30_ADC1A_AFE_CTL:
+ case CS53L30_ADC1B_AFE_CTL:
+ case CS53L30_ADC1A_DIG_VOL:
+ case CS53L30_ADC1B_DIG_VOL:
+ case CS53L30_ADCDMIC2_CTL1:
+ case CS53L30_ADCDMIC2_CTL2:
+ case CS53L30_ADC2_CTL3:
+ case CS53L30_ADC2_NG_CTL:
+ case CS53L30_ADC2A_AFE_CTL:
+ case CS53L30_ADC2B_AFE_CTL:
+ case CS53L30_ADC2A_DIG_VOL:
+ case CS53L30_ADC2B_DIG_VOL:
+ case CS53L30_INT_MASK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(adc_boost_tlv, 0, 2000, 0);
+static DECLARE_TLV_DB_SCALE(adc_ng_boost_tlv, 0, 3000, 0);
+static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
+static DECLARE_TLV_DB_SCALE(dig_tlv, -9600, 100, 1);
+static DECLARE_TLV_DB_SCALE(pga_preamp_tlv, 0, 10000, 0);
+
+static const char * const input1_sel_text[] = {
+ "DMIC1 On AB In",
+ "DMIC1 On A In",
+ "DMIC1 On B In",
+ "ADC1 On AB In",
+ "ADC1 On A In",
+ "ADC1 On B In",
+ "DMIC1 Off ADC1 Off",
+};
+
+static unsigned int const input1_sel_values[] = {
+ CS53L30_CH_TYPE,
+ CS53L30_ADCxB_PDN | CS53L30_CH_TYPE,
+ CS53L30_ADCxA_PDN | CS53L30_CH_TYPE,
+ CS53L30_DMICx_PDN,
+ CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+};
+
+static const char * const input2_sel_text[] = {
+ "DMIC2 On AB In",
+ "DMIC2 On A In",
+ "DMIC2 On B In",
+ "ADC2 On AB In",
+ "ADC2 On A In",
+ "ADC2 On B In",
+ "DMIC2 Off ADC2 Off",
+};
+
+static unsigned int const input2_sel_values[] = {
+ 0x0,
+ CS53L30_ADCxB_PDN,
+ CS53L30_ADCxA_PDN,
+ CS53L30_DMICx_PDN,
+ CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+};
+
+static const char * const input1_route_sel_text[] = {
+ "ADC1_SEL", "DMIC1_SEL",
+};
+
+static const struct soc_enum input1_route_sel_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADCDMIC1_CTL1, CS53L30_CH_TYPE_SHIFT,
+ ARRAY_SIZE(input1_route_sel_text),
+ input1_route_sel_text);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(input1_sel_enum, CS53L30_ADCDMIC1_CTL1, 0,
+ CS53L30_ADCDMICx_PDN_MASK, input1_sel_text,
+ input1_sel_values);
+
+static const struct snd_kcontrol_new input1_route_sel_mux =
+ SOC_DAPM_ENUM("Input 1 Route", input1_route_sel_enum);
+
+static const char * const input2_route_sel_text[] = {
+ "ADC2_SEL", "DMIC2_SEL",
+};
+
+/* Note: CS53L30_ADCDMIC1_CTL1 CH_TYPE controls inputs 1 and 2 */
+static const struct soc_enum input2_route_sel_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADCDMIC1_CTL1, 0,
+ ARRAY_SIZE(input2_route_sel_text),
+ input2_route_sel_text);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(input2_sel_enum, CS53L30_ADCDMIC2_CTL1, 0,
+ CS53L30_ADCDMICx_PDN_MASK, input2_sel_text,
+ input2_sel_values);
+
+static const struct snd_kcontrol_new input2_route_sel_mux =
+ SOC_DAPM_ENUM("Input 2 Route", input2_route_sel_enum);
+
+/*
+ * TB = 6144*(MCLK(int) scaling factor)/MCLK(internal)
+ * TB - Time base
+ * NOTE: If MCLK_INT_SCALE = 0, then TB=1
+ */
+static const char * const cs53l30_ng_delay_text[] = {
+ "TB*50ms", "TB*100ms", "TB*150ms", "TB*200ms",
+};
+
+static const struct soc_enum adc1_ng_delay_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_DELAY_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_delay_text),
+ cs53l30_ng_delay_text);
+
+static const struct soc_enum adc2_ng_delay_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_DELAY_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_delay_text),
+ cs53l30_ng_delay_text);
+
+/* The noise gate threshold selected will depend on NG Boost */
+static const char * const cs53l30_ng_thres_text[] = {
+ "-64dB/-34dB", "-66dB/-36dB", "-70dB/-40dB", "-73dB/-43dB",
+ "-76dB/-46dB", "-82dB/-52dB", "-58dB", "-64dB",
+};
+
+static const struct soc_enum adc1_ng_thres_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_THRESH_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_thres_text),
+ cs53l30_ng_thres_text);
+
+static const struct soc_enum adc2_ng_thres_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_THRESH_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_thres_text),
+ cs53l30_ng_thres_text);
+
+/* Corner frequencies are with an Fs of 48kHz. */
+static const char * const hpf_corner_freq_text[] = {
+ "1.86Hz", "120Hz", "235Hz", "466Hz",
+};
+
+static const struct soc_enum adc1_hpf_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC1_CTL3, CS53L30_ADCx_HPF_CF_SHIFT,
+ ARRAY_SIZE(hpf_corner_freq_text), hpf_corner_freq_text);
+
+static const struct soc_enum adc2_hpf_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC2_CTL3, CS53L30_ADCx_HPF_CF_SHIFT,
+ ARRAY_SIZE(hpf_corner_freq_text), hpf_corner_freq_text);
+
+static const struct snd_kcontrol_new cs53l30_snd_controls[] = {
+ SOC_SINGLE("Digital Soft-Ramp Switch", CS53L30_SFT_RAMP,
+ CS53L30_DIGSFT_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1 Noise Gate Ganging Switch", CS53L30_ADC1_CTL3,
+ CS53L30_ADCx_NG_ALL_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2 Noise Gate Ganging Switch", CS53L30_ADC2_CTL3,
+ CS53L30_ADCx_NG_ALL_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1A Noise Gate Enable Switch", CS53L30_ADC1_NG_CTL,
+ CS53L30_ADCxA_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1B Noise Gate Enable Switch", CS53L30_ADC1_NG_CTL,
+ CS53L30_ADCxB_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2A Noise Gate Enable Switch", CS53L30_ADC2_NG_CTL,
+ CS53L30_ADCxA_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2B Noise Gate Enable Switch", CS53L30_ADC2_NG_CTL,
+ CS53L30_ADCxB_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1 Notch Filter Switch", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCx_NOTCH_DIS_SHIFT, 1, 1),
+ SOC_SINGLE("ADC2 Notch Filter Switch", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCx_NOTCH_DIS_SHIFT, 1, 1),
+ SOC_SINGLE("ADC1A Invert Switch", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxA_INV_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1B Invert Switch", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxB_INV_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2A Invert Switch", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxA_INV_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2B Invert Switch", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxB_INV_SHIFT, 1, 0),
+
+ SOC_SINGLE_TLV("ADC1A Digital Boost Volume", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxA_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC1B Digital Boost Volume", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxB_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC2A Digital Boost Volume", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxA_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC2B Digital Boost Volume", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxB_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC1 NG Boost Volume", CS53L30_ADC1_NG_CTL,
+ CS53L30_ADCx_NG_BOOST_SHIFT, 1, 0, adc_ng_boost_tlv),
+ SOC_SINGLE_TLV("ADC2 NG Boost Volume", CS53L30_ADC2_NG_CTL,
+ CS53L30_ADCx_NG_BOOST_SHIFT, 1, 0, adc_ng_boost_tlv),
+
+ SOC_DOUBLE_R_TLV("ADC1 Preamplifier Volume", CS53L30_ADC1A_AFE_CTL,
+ CS53L30_ADC1B_AFE_CTL, CS53L30_ADCxy_PREAMP_SHIFT,
+ 2, 0, pga_preamp_tlv),
+ SOC_DOUBLE_R_TLV("ADC2 Preamplifier Volume", CS53L30_ADC2A_AFE_CTL,
+ CS53L30_ADC2B_AFE_CTL, CS53L30_ADCxy_PREAMP_SHIFT,
+ 2, 0, pga_preamp_tlv),
+
+ SOC_ENUM("Input 1 Channel Select", input1_sel_enum),
+ SOC_ENUM("Input 2 Channel Select", input2_sel_enum),
+
+ SOC_ENUM("ADC1 HPF Select", adc1_hpf_enum),
+ SOC_ENUM("ADC2 HPF Select", adc2_hpf_enum),
+ SOC_ENUM("ADC1 NG Threshold", adc1_ng_thres_enum),
+ SOC_ENUM("ADC2 NG Threshold", adc2_ng_thres_enum),
+ SOC_ENUM("ADC1 NG Delay", adc1_ng_delay_enum),
+ SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum),
+
+ SOC_SINGLE_SX_TLV("ADC1A PGA Volume",
+ CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+ SOC_SINGLE_SX_TLV("ADC1B PGA Volume",
+ CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+ SOC_SINGLE_SX_TLV("ADC2A PGA Volume",
+ CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+ SOC_SINGLE_SX_TLV("ADC2B PGA Volume",
+ CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+
+ SOC_SINGLE_SX_TLV("ADC1A Digital Volume",
+ CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+ SOC_SINGLE_SX_TLV("ADC1B Digital Volume",
+ CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+ SOC_SINGLE_SX_TLV("ADC2A Digital Volume",
+ CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+ SOC_SINGLE_SX_TLV("ADC2B Digital Volume",
+ CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+};
+
+static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("IN1_DMIC1"),
+ SND_SOC_DAPM_INPUT("IN2"),
+ SND_SOC_DAPM_INPUT("IN3_DMIC2"),
+ SND_SOC_DAPM_INPUT("IN4"),
+ SND_SOC_DAPM_SUPPLY("MIC1 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC1_BIAS_PDN_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC2 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC2_BIAS_PDN_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC3 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC3_BIAS_PDN_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC4 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC4_BIAS_PDN_SHIFT, 1, NULL, 0),
+
+ SND_SOC_DAPM_AIF_OUT("ASP_SDOUT1", NULL, 0, CS53L30_ASP_CTL1,
+ CS53L30_ASP_SDOUTx_PDN_SHIFT, 1),
+ SND_SOC_DAPM_AIF_OUT("ASP_SDOUT2", NULL, 0, CS53L30_ASP_CTL2,
+ CS53L30_ASP_SDOUTx_PDN_SHIFT, 1),
+
+ SND_SOC_DAPM_MUX("Input Mux 1", SND_SOC_NOPM, 0, 0,
+ &input1_route_sel_mux),
+ SND_SOC_DAPM_MUX("Input Mux 2", SND_SOC_NOPM, 0, 0,
+ &input2_route_sel_mux),
+
+ SND_SOC_DAPM_ADC("ADC1A", NULL, CS53L30_ADCDMIC1_CTL1,
+ CS53L30_ADCxA_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("ADC1B", NULL, CS53L30_ADCDMIC1_CTL1,
+ CS53L30_ADCxB_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("ADC2A", NULL, CS53L30_ADCDMIC2_CTL1,
+ CS53L30_ADCxA_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("ADC2B", NULL, CS53L30_ADCDMIC2_CTL1,
+ CS53L30_ADCxB_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("DMIC1", NULL, CS53L30_ADCDMIC1_CTL1,
+ CS53L30_DMICx_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("DMIC2", NULL, CS53L30_ADCDMIC2_CTL1,
+ CS53L30_DMICx_PDN_SHIFT, 1),
+};
+
+static const struct snd_soc_dapm_route cs53l30_dapm_routes[] = {
+ /* ADC Input Paths */
+ {"ADC1A", NULL, "IN1_DMIC1"},
+ {"Input Mux 1", "ADC1_SEL", "ADC1A"},
+ {"ADC1B", NULL, "IN2"},
+
+ {"ADC2A", NULL, "IN3_DMIC2"},
+ {"Input Mux 2", "ADC2_SEL", "ADC2A"},
+ {"ADC2B", NULL, "IN4"},
+
+ /* MIC Bias Paths */
+ {"ADC1A", NULL, "MIC1 Bias"},
+ {"ADC1B", NULL, "MIC2 Bias"},
+ {"ADC2A", NULL, "MIC3 Bias"},
+ {"ADC2B", NULL, "MIC4 Bias"},
+
+ /* DMIC Paths */
+ {"DMIC1", NULL, "IN1_DMIC1"},
+ {"Input Mux 1", "DMIC1_SEL", "DMIC1"},
+
+ {"DMIC2", NULL, "IN3_DMIC2"},
+ {"Input Mux 2", "DMIC2_SEL", "DMIC2"},
+};
+
+static const struct snd_soc_dapm_route cs53l30_dapm_routes_sdout1[] = {
+ /* Output Paths when using SDOUT1 only */
+ {"ASP_SDOUT1", NULL, "ADC1A" },
+ {"ASP_SDOUT1", NULL, "Input Mux 1"},
+ {"ASP_SDOUT1", NULL, "ADC1B"},
+
+ {"ASP_SDOUT1", NULL, "ADC2A"},
+ {"ASP_SDOUT1", NULL, "Input Mux 2"},
+ {"ASP_SDOUT1", NULL, "ADC2B"},
+
+ {"Capture", NULL, "ASP_SDOUT1"},
+};
+
+static const struct snd_soc_dapm_route cs53l30_dapm_routes_sdout2[] = {
+ /* Output Paths when using both SDOUT1 and SDOUT2 */
+ {"ASP_SDOUT1", NULL, "ADC1A" },
+ {"ASP_SDOUT1", NULL, "Input Mux 1"},
+ {"ASP_SDOUT1", NULL, "ADC1B"},
+
+ {"ASP_SDOUT2", NULL, "ADC2A"},
+ {"ASP_SDOUT2", NULL, "Input Mux 2"},
+ {"ASP_SDOUT2", NULL, "ADC2B"},
+
+ {"Capture", NULL, "ASP_SDOUT1"},
+ {"Capture", NULL, "ASP_SDOUT2"},
+};
+
+struct cs53l30_mclk_div {
+ u32 mclk_rate;
+ u32 srate;
+ u8 asp_rate;
+ u8 internal_fs_ratio;
+ u8 mclk_int_scale;
+};
+
+static struct cs53l30_mclk_div cs53l30_mclk_coeffs[] = {
+ /* NOTE: Enable MCLK_INT_SCALE to save power. */
+
+ /* MCLK, Sample Rate, asp_rate, internal_fs_ratio, mclk_int_scale */
+ {5644800, 11025, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {5644800, 22050, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {5644800, 44100, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+
+ {6000000, 8000, 0x1, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 11025, 0x2, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 12000, 0x4, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 16000, 0x5, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 22050, 0x6, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 24000, 0x8, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 32000, 0x9, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 44100, 0xA, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 48000, 0xC, 0, CS53L30_MCLK_INT_SCALE},
+
+ {6144000, 8000, 0x1, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 11025, 0x2, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 12000, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 16000, 0x5, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 22050, 0x6, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 24000, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 32000, 0x9, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 44100, 0xA, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 48000, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+
+ {6400000, 8000, 0x1, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 11025, 0x2, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 12000, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 16000, 0x5, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 22050, 0x6, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 24000, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 32000, 0x9, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 44100, 0xA, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 48000, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+};
+
+struct cs53l30_mclkx_div {
+ u32 mclkx;
+ u8 ratio;
+ u8 mclkdiv;
+};
+
+static struct cs53l30_mclkx_div cs53l30_mclkx_coeffs[] = {
+ {5644800, 1, CS53L30_MCLK_DIV_BY_1},
+ {6000000, 1, CS53L30_MCLK_DIV_BY_1},
+ {6144000, 1, CS53L30_MCLK_DIV_BY_1},
+ {11289600, 2, CS53L30_MCLK_DIV_BY_2},
+ {12288000, 2, CS53L30_MCLK_DIV_BY_2},
+ {12000000, 2, CS53L30_MCLK_DIV_BY_2},
+ {19200000, 3, CS53L30_MCLK_DIV_BY_3},
+};
+
+static int cs53l30_get_mclkx_coeff(int mclkx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs53l30_mclkx_coeffs); i++) {
+ if (cs53l30_mclkx_coeffs[i].mclkx == mclkx)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cs53l30_get_mclk_coeff(int mclk_rate, int srate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs53l30_mclk_coeffs); i++) {
+ if (cs53l30_mclk_coeffs[i].mclk_rate == mclk_rate &&
+ cs53l30_mclk_coeffs[i].srate == srate)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cs53l30_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ int mclkx_coeff;
+ u32 mclk_rate;
+
+ /* MCLKX -> MCLK */
+ mclkx_coeff = cs53l30_get_mclkx_coeff(freq);
+ if (mclkx_coeff < 0)
+ return mclkx_coeff;
+
+ mclk_rate = cs53l30_mclkx_coeffs[mclkx_coeff].mclkx /
+ cs53l30_mclkx_coeffs[mclkx_coeff].ratio;
+
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_DIV_MASK,
+ cs53l30_mclkx_coeffs[mclkx_coeff].mclkdiv);
+
+ priv->mclk_rate = mclk_rate;
+
+ return 0;
+}
+
+static int cs53l30_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ u8 aspcfg = 0, aspctl1 = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aspcfg |= CS53L30_ASP_MS;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* DAI mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ /* Set TDM_PDN to turn off TDM mode -- Reset default */
+ aspctl1 |= CS53L30_ASP_TDM_PDN;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ /*
+ * Clear TDM_PDN to turn on TDM mode; Use ASP_SCLK_INV = 0
+ * with SHIFT_LEFT = 1 combination as Figure 4-13 shows in
+ * the CS53L30 datasheet
+ */
+ aspctl1 |= CS53L30_SHIFT_LEFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check to see if the SCLK is inverted */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_NF:
+ case SND_SOC_DAIFMT_IB_IF:
+ aspcfg ^= CS53L30_ASP_SCLK_INV;
+ break;
+ default:
+ break;
+ }
+
+ regmap_update_bits(priv->regmap, CS53L30_ASPCFG_CTL,
+ CS53L30_ASP_MS | CS53L30_ASP_SCLK_INV, aspcfg);
+
+ regmap_update_bits(priv->regmap, CS53L30_ASP_CTL1,
+ CS53L30_ASP_TDM_PDN | CS53L30_SHIFT_LEFT, aspctl1);
+
+ return 0;
+}
+
+static int cs53l30_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ int srate = params_rate(params);
+ int mclk_coeff;
+
+ /* MCLK -> srate */
+ mclk_coeff = cs53l30_get_mclk_coeff(priv->mclk_rate, srate);
+ if (mclk_coeff < 0)
+ return -EINVAL;
+
+ regmap_update_bits(priv->regmap, CS53L30_INT_SR_CTL,
+ CS53L30_INTRNL_FS_RATIO_MASK,
+ cs53l30_mclk_coeffs[mclk_coeff].internal_fs_ratio);
+
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_INT_SCALE_MASK,
+ cs53l30_mclk_coeffs[mclk_coeff].mclk_int_scale);
+
+ regmap_update_bits(priv->regmap, CS53L30_ASPCFG_CTL,
+ CS53L30_ASP_RATE_MASK,
+ cs53l30_mclk_coeffs[mclk_coeff].asp_rate);
+
+ return 0;
+}
+
+static int cs53l30_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg;
+ int i, inter_max_check, ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY)
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_LP_MASK, 0);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (dapm->bias_level == SND_SOC_BIAS_OFF) {
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret) {
+ dev_err(codec->dev,
+ "failed to enable MCLK: %d\n", ret);
+ return ret;
+ }
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_DIS_MASK, 0);
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_ULP_MASK, 0);
+ msleep(50);
+ } else {
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_ULP_MASK,
+ CS53L30_PDN_ULP);
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ regmap_update_bits(priv->regmap, CS53L30_INT_MASK,
+ CS53L30_PDN_DONE, 0);
+ /*
+ * If digital softramp is set, the amount of time required
+ * for power down increases and depends on the digital
+ * volume setting.
+ */
+
+ /* Set the max possible time if digsft is set */
+ regmap_read(priv->regmap, CS53L30_SFT_RAMP, ®);
+ if (reg & CS53L30_DIGSFT_MASK)
+ inter_max_check = CS53L30_PDN_POLL_MAX;
+ else
+ inter_max_check = 10;
+
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_ULP_MASK,
+ CS53L30_PDN_ULP);
+ /* PDN_DONE will take a min of 20ms to be set.*/
+ msleep(20);
+ /* Clr status */
+ regmap_read(priv->regmap, CS53L30_IS, ®);
+ for (i = 0; i < inter_max_check; i++) {
+ if (inter_max_check < 10) {
+ usleep_range(1000, 1100);
+ regmap_read(priv->regmap, CS53L30_IS, ®);
+ if (reg & CS53L30_PDN_DONE)
+ break;
+ } else {
+ usleep_range(10000, 10100);
+ regmap_read(priv->regmap, CS53L30_IS, ®);
+ if (reg & CS53L30_PDN_DONE)
+ break;
+ }
+ }
+ /* PDN_DONE is set. We now can disable the MCLK */
+ regmap_update_bits(priv->regmap, CS53L30_INT_MASK,
+ CS53L30_PDN_DONE, CS53L30_PDN_DONE);
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_DIS_MASK,
+ CS53L30_MCLK_DIS);
+ clk_disable_unprepare(priv->mclk);
+ break;
+ }
+
+ return 0;
+}
+
+static int cs53l30_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ u8 val = tristate ? CS53L30_ASP_3ST : 0;
+
+ return regmap_update_bits(priv->regmap, CS53L30_ASP_CTL1,
+ CS53L30_ASP_3ST_MASK, val);
+}
+
+static unsigned int const cs53l30_src_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static struct snd_pcm_hw_constraint_list src_constraints = {
+ .count = ARRAY_SIZE(cs53l30_src_rates),
+ .list = cs53l30_src_rates,
+};
+
+static int cs53l30_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &src_constraints);
+
+ return 0;
+}
+
+/*
+ * Note: CS53L30 counts the slot number per byte while ASoC counts the slot
+ * number per slot_width. So there is a difference between the slots of ASoC
+ * and the slots of CS53L30.
+ */
+static int cs53l30_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int loc[CS53L30_TDM_SLOT_MAX] = {48, 48, 48, 48};
+ unsigned int slot_next, slot_step;
+ u64 tx_enable = 0;
+ int i;
+
+ if (!rx_mask) {
+ dev_err(dai->dev, "rx masks must not be 0\n");
+ return -EINVAL;
+ }
+
+ /* Assuming slot_width is not supposed to be greater than 64 */
+ if (slots <= 0 || slot_width <= 0 || slot_width > 64) {
+ dev_err(dai->dev, "invalid slot number or slot width\n");
+ return -EINVAL;
+ }
+
+ if (slot_width & 0x7) {
+ dev_err(dai->dev, "slot width must count in byte\n");
+ return -EINVAL;
+ }
+
+ /* How many bytes in each ASoC slot */
+ slot_step = slot_width >> 3;
+
+ for (i = 0; rx_mask && i < CS53L30_TDM_SLOT_MAX; i++) {
+ /* Find the first slot from LSB */
+ slot_next = __ffs(rx_mask);
+ /* Save the slot location by converting to CS53L30 slot */
+ loc[i] = slot_next * slot_step;
+ /* Create the mask of CS53L30 slot */
+ tx_enable |= (u64)((u64)(1 << slot_step) - 1) << (u64)loc[i];
+ /* Clear this slot from rx_mask */
+ rx_mask &= ~(1 << slot_next);
+ }
+
+ /* Error out to avoid slot shift */
+ if (rx_mask && i == CS53L30_TDM_SLOT_MAX) {
+ dev_err(dai->dev, "rx_mask exceeds max slot number: %d\n",
+ CS53L30_TDM_SLOT_MAX);
+ return -EINVAL;
+ }
+
+ /* Validate the last active CS53L30 slot */
+ slot_next = loc[i - 1] + slot_step - 1;
+ if (slot_next > 47) {
+ dev_err(dai->dev, "slot selection out of bounds: %u\n",
+ slot_next);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CS53L30_TDM_SLOT_MAX && loc[i] != 48; i++) {
+ regmap_update_bits(priv->regmap, CS53L30_ASP_TDMTX_CTL(i),
+ CS53L30_ASP_CHx_TX_LOC_MASK, loc[i]);
+ dev_dbg(dai->dev, "loc[%d]=%x\n", i, loc[i]);
+ }
+
+ for (i = 0; i < CS53L30_ASP_TDMTX_ENx_MAX && tx_enable; i++) {
+ regmap_write(priv->regmap, CS53L30_ASP_TDMTX_ENx(i),
+ tx_enable & 0xff);
+ tx_enable >>= 8;
+ dev_dbg(dai->dev, "en_reg=%x, tx_enable=%llx\n",
+ CS53L30_ASP_TDMTX_ENx(i), tx_enable & 0xff);
+ }
+
+ return 0;
+}
+
+static int cs53l30_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+
+ if (priv->mute_gpio)
+ gpiod_set_value_cansleep(priv->mute_gpio, mute);
+
+ return 0;
+}
+
+/* SNDRV_PCM_RATE_KNOT -> 12000, 24000 Hz, limit with constraint list */
+#define CS53L30_RATES (SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT)
+
+#define CS53L30_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static const struct snd_soc_dai_ops cs53l30_ops = {
+ .startup = cs53l30_pcm_startup,
+ .hw_params = cs53l30_pcm_hw_params,
+ .set_fmt = cs53l30_set_dai_fmt,
+ .set_sysclk = cs53l30_set_sysclk,
+ .set_tristate = cs53l30_set_tristate,
+ .set_tdm_slot = cs53l30_set_dai_tdm_slot,
+ .mute_stream = cs53l30_mute_stream,
+};
+
+static struct snd_soc_dai_driver cs53l30_dai = {
+ .name = "cs53l30",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = CS53L30_RATES,
+ .formats = CS53L30_FORMATS,
+ },
+ .ops = &cs53l30_ops,
+ .symmetric_rates = 1,
+};
+
+static int cs53l30_codec_probe(struct snd_soc_codec *codec)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ if (priv->use_sdout2)
+ snd_soc_dapm_add_routes(dapm, cs53l30_dapm_routes_sdout2,
+ ARRAY_SIZE(cs53l30_dapm_routes_sdout2));
+ else
+ snd_soc_dapm_add_routes(dapm, cs53l30_dapm_routes_sdout1,
+ ARRAY_SIZE(cs53l30_dapm_routes_sdout1));
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver cs53l30_driver = {
+ .probe = cs53l30_codec_probe,
+ .set_bias_level = cs53l30_set_bias_level,
+ .idle_bias_off = true,
+
+ .dapm_widgets = cs53l30_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs53l30_dapm_widgets),
+ .dapm_routes = cs53l30_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs53l30_dapm_routes),
+
+ .controls = cs53l30_snd_controls,
+ .num_controls = ARRAY_SIZE(cs53l30_snd_controls),
+};
+
+static struct regmap_config cs53l30_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CS53L30_MAX_REGISTER,
+ .reg_defaults = cs53l30_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(cs53l30_reg_defaults),
+ .volatile_reg = cs53l30_volatile_register,
+ .writeable_reg = cs53l30_writeable_register,
+ .readable_reg = cs53l30_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int cs53l30_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct device_node *np = client->dev.of_node;
+ struct device *dev = &client->dev;
+ struct cs53l30_private *cs53l30;
+ unsigned int devid = 0;
+ unsigned int reg;
+ int ret = 0, i;
+ u8 val;
+
+ cs53l30 = devm_kzalloc(dev, sizeof(*cs53l30), GFP_KERNEL);
+ if (!cs53l30)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(cs53l30->supplies); i++)
+ cs53l30->supplies[i].supply = cs53l30_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ if (ret) {
+ dev_err(dev, "failed to get supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset the Device */
+ cs53l30->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cs53l30->reset_gpio)) {
+ ret = PTR_ERR(cs53l30->reset_gpio);
+ goto error;
+ }
+
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
+
+ i2c_set_clientdata(client, cs53l30);
+
+ cs53l30->mclk_rate = 0;
+
+ cs53l30->regmap = devm_regmap_init_i2c(client, &cs53l30_regmap);
+ if (IS_ERR(cs53l30->regmap)) {
+ ret = PTR_ERR(cs53l30->regmap);
+ dev_err(dev, "regmap_init() failed: %d\n", ret);
+ goto error;
+ }
+
+ /* Initialize codec */
+ ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_AB, ®);
+ devid = reg << 12;
+
+ ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_CD, ®);
+ devid |= reg << 4;
+
+ ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_E, ®);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS53L30_DEVID) {
+ ret = -ENODEV;
+ dev_err(dev, "Device ID (%X). Expected %X\n",
+ devid, CS53L30_DEVID);
+ goto error;
+ }
+
+ ret = regmap_read(cs53l30->regmap, CS53L30_REVID, ®);
+ if (ret < 0) {
+ dev_err(dev, "failed to get Revision ID: %d\n", ret);
+ goto error;
+ }
+
+ /* Check if MCLK provided */
+ cs53l30->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(cs53l30->mclk)) {
+ if (PTR_ERR(cs53l30->mclk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+ /* Otherwise mark the mclk pointer to NULL */
+ cs53l30->mclk = NULL;
+ }
+
+ /* Fetch the MUTE control */
+ cs53l30->mute_gpio = devm_gpiod_get_optional(dev, "mute",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(cs53l30->mute_gpio)) {
+ ret = PTR_ERR(cs53l30->mute_gpio);
+ goto error;
+ }
+
+ if (cs53l30->mute_gpio) {
+ /* Enable MUTE controls via MUTE pin */
+ regmap_write(cs53l30->regmap, CS53L30_MUTEP_CTL1,
+ CS53L30_MUTEP_CTL1_MUTEALL);
+ /* Flip the polarity of MUTE pin */
+ if (gpiod_is_active_low(cs53l30->mute_gpio))
+ regmap_update_bits(cs53l30->regmap, CS53L30_MUTEP_CTL2,
+ CS53L30_MUTE_PIN_POLARITY, 0);
+ }
+
+ if (!of_property_read_u8(np, "cirrus,micbias-lvl", &val))
+ regmap_update_bits(cs53l30->regmap, CS53L30_MICBIAS_CTL,
+ CS53L30_MIC_BIAS_CTRL_MASK, val);
+
+ if (of_property_read_bool(np, "cirrus,use-sdout2"))
+ cs53l30->use_sdout2 = true;
+
+ dev_info(dev, "Cirrus Logic CS53L30, Revision: %02X\n", reg & 0xFF);
+
+ ret = snd_soc_register_codec(dev, &cs53l30_driver, &cs53l30_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register codec: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ return ret;
+}
+
+static int cs53l30_i2c_remove(struct i2c_client *client)
+{
+ struct cs53l30_private *cs53l30 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&client->dev);
+
+ /* Hold down reset */
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs53l30_runtime_suspend(struct device *dev)
+{
+ struct cs53l30_private *cs53l30 = dev_get_drvdata(dev);
+
+ regcache_cache_only(cs53l30->regmap, true);
+
+ /* Hold down reset */
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+
+ return 0;
+}
+
+static int cs53l30_runtime_resume(struct device *dev)
+{
+ struct cs53l30_private *cs53l30 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
+
+ regcache_cache_only(cs53l30->regmap, false);
+ ret = regcache_sync(cs53l30->regmap);
+ if (ret) {
+ dev_err(dev, "failed to synchronize regcache: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs53l30_runtime_pm = {
+ SET_RUNTIME_PM_OPS(cs53l30_runtime_suspend, cs53l30_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id cs53l30_of_match[] = {
+ { .compatible = "cirrus,cs53l30", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, cs53l30_of_match);
+
+static const struct i2c_device_id cs53l30_id[] = {
+ { "cs53l30", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs53l30_id);
+
+static struct i2c_driver cs53l30_i2c_driver = {
+ .driver = {
+ .name = "cs53l30",
+ .pm = &cs53l30_runtime_pm,
+ },
+ .id_table = cs53l30_id,
+ .probe = cs53l30_i2c_probe,
+ .remove = cs53l30_i2c_remove,
+};
+
+module_i2c_driver(cs53l30_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS53L30 driver");
+MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <Paul.Handrigan@cirrus.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * ALSA SoC CS53L30 codec driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <Paul.Handrigan@cirrus.com>,
+ * Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS53L30_H__
+#define __CS53L30_H__
+
+/* I2C Registers */
+#define CS53L30_DEVID_AB 0x01 /* Device ID A & B [RO]. */
+#define CS53L30_DEVID_CD 0x02 /* Device ID C & D [RO]. */
+#define CS53L30_DEVID_E 0x03 /* Device ID E [RO]. */
+#define CS53L30_REVID 0x05 /* Revision ID [RO]. */
+#define CS53L30_PWRCTL 0x06 /* Power Control. */
+#define CS53L30_MCLKCTL 0x07 /* MCLK Control. */
+#define CS53L30_INT_SR_CTL 0x08 /* Internal Sample Rate Control. */
+#define CS53L30_MICBIAS_CTL 0x0A /* Mic Bias Control. */
+#define CS53L30_ASPCFG_CTL 0x0C /* ASP Config Control. */
+#define CS53L30_ASP_CTL1 0x0D /* ASP1 Control. */
+#define CS53L30_ASP_TDMTX_CTL1 0x0E /* ASP1 TDM TX Control 1 */
+#define CS53L30_ASP_TDMTX_CTL2 0x0F /* ASP1 TDM TX Control 2 */
+#define CS53L30_ASP_TDMTX_CTL3 0x10 /* ASP1 TDM TX Control 3 */
+#define CS53L30_ASP_TDMTX_CTL4 0x11 /* ASP1 TDM TX Control 4 */
+#define CS53L30_ASP_TDMTX_EN1 0x12 /* ASP1 TDM TX Enable 1 */
+#define CS53L30_ASP_TDMTX_EN2 0x13 /* ASP1 TDM TX Enable 2 */
+#define CS53L30_ASP_TDMTX_EN3 0x14 /* ASP1 TDM TX Enable 3 */
+#define CS53L30_ASP_TDMTX_EN4 0x15 /* ASP1 TDM TX Enable 4 */
+#define CS53L30_ASP_TDMTX_EN5 0x16 /* ASP1 TDM TX Enable 5 */
+#define CS53L30_ASP_TDMTX_EN6 0x17 /* ASP1 TDM TX Enable 6 */
+#define CS53L30_ASP_CTL2 0x18 /* ASP2 Control. */
+#define CS53L30_SFT_RAMP 0x1A /* Soft Ramp Control. */
+#define CS53L30_LRCK_CTL1 0x1B /* LRCK Control 1. */
+#define CS53L30_LRCK_CTL2 0x1C /* LRCK Control 2. */
+#define CS53L30_MUTEP_CTL1 0x1F /* Mute Pin Control 1. */
+#define CS53L30_MUTEP_CTL2 0x20 /* Mute Pin Control 2. */
+#define CS53L30_INBIAS_CTL1 0x21 /* Input Bias Control 1. */
+#define CS53L30_INBIAS_CTL2 0x22 /* Input Bias Control 2. */
+#define CS53L30_DMIC1_STR_CTL 0x23 /* DMIC1 Stereo Control. */
+#define CS53L30_DMIC2_STR_CTL 0x24 /* DMIC2 Stereo Control. */
+#define CS53L30_ADCDMIC1_CTL1 0x25 /* ADC1/DMIC1 Control 1. */
+#define CS53L30_ADCDMIC1_CTL2 0x26 /* ADC1/DMIC1 Control 2. */
+#define CS53L30_ADC1_CTL3 0x27 /* ADC1 Control 3. */
+#define CS53L30_ADC1_NG_CTL 0x28 /* ADC1 Noise Gate Control. */
+#define CS53L30_ADC1A_AFE_CTL 0x29 /* ADC1A AFE Control. */
+#define CS53L30_ADC1B_AFE_CTL 0x2A /* ADC1B AFE Control. */
+#define CS53L30_ADC1A_DIG_VOL 0x2B /* ADC1A Digital Volume. */
+#define CS53L30_ADC1B_DIG_VOL 0x2C /* ADC1B Digital Volume. */
+#define CS53L30_ADCDMIC2_CTL1 0x2D /* ADC2/DMIC2 Control 1. */
+#define CS53L30_ADCDMIC2_CTL2 0x2E /* ADC2/DMIC2 Control 2. */
+#define CS53L30_ADC2_CTL3 0x2F /* ADC2 Control 3. */
+#define CS53L30_ADC2_NG_CTL 0x30 /* ADC2 Noise Gate Control. */
+#define CS53L30_ADC2A_AFE_CTL 0x31 /* ADC2A AFE Control. */
+#define CS53L30_ADC2B_AFE_CTL 0x32 /* ADC2B AFE Control. */
+#define CS53L30_ADC2A_DIG_VOL 0x33 /* ADC2A Digital Volume. */
+#define CS53L30_ADC2B_DIG_VOL 0x34 /* ADC2B Digital Volume. */
+#define CS53L30_INT_MASK 0x35 /* Interrupt Mask. */
+#define CS53L30_IS 0x36 /* Interrupt Status. */
+#define CS53L30_MAX_REGISTER 0x36
+
+#define CS53L30_TDM_SLOT_MAX 4
+#define CS53L30_ASP_TDMTX_CTL(x) (CS53L30_ASP_TDMTX_CTL1 + (x))
+/* x : index for registers; n : index for slot; 8 slots per register */
+#define CS53L30_ASP_TDMTX_ENx(x) (CS53L30_ASP_TDMTX_EN6 - (x))
+#define CS53L30_ASP_TDMTX_ENn(n) CS53L30_ASP_TDMTX_ENx((n) >> 3)
+#define CS53L30_ASP_TDMTX_ENx_MAX 6
+
+/* Device ID */
+#define CS53L30_DEVID 0x53A30
+
+/* PDN_DONE Poll Maximum
+ * If soft ramp is set it will take much longer to power down
+ * the system.
+ */
+#define CS53L30_PDN_POLL_MAX 90
+
+/* Bitfield Definitions */
+
+/* R6 (0x06) CS53L30_PWRCTL - Power Control */
+#define CS53L30_PDN_ULP_SHIFT 7
+#define CS53L30_PDN_ULP_MASK (1 << CS53L30_PDN_ULP_SHIFT)
+#define CS53L30_PDN_ULP (1 << CS53L30_PDN_ULP_SHIFT)
+#define CS53L30_PDN_LP_SHIFT 6
+#define CS53L30_PDN_LP_MASK (1 << CS53L30_PDN_LP_SHIFT)
+#define CS53L30_PDN_LP (1 << CS53L30_PDN_LP_SHIFT)
+#define CS53L30_DISCHARGE_FILT_SHIFT 5
+#define CS53L30_DISCHARGE_FILT_MASK (1 << CS53L30_DISCHARGE_FILT_SHIFT)
+#define CS53L30_DISCHARGE_FILT (1 << CS53L30_DISCHARGE_FILT_SHIFT)
+#define CS53L30_THMS_PDN_SHIFT 4
+#define CS53L30_THMS_PDN_MASK (1 << CS53L30_THMS_PDN_SHIFT)
+#define CS53L30_THMS_PDN (1 << CS53L30_THMS_PDN_SHIFT)
+
+#define CS53L30_PWRCTL_DEFAULT (CS53L30_THMS_PDN)
+
+/* R7 (0x07) CS53L30_MCLKCTL - MCLK Control */
+#define CS53L30_MCLK_DIS_SHIFT 7
+#define CS53L30_MCLK_DIS_MASK (1 << CS53L30_MCLK_DIS_SHIFT)
+#define CS53L30_MCLK_DIS (1 << CS53L30_MCLK_DIS_SHIFT)
+#define CS53L30_MCLK_INT_SCALE_SHIFT 6
+#define CS53L30_MCLK_INT_SCALE_MASK (1 << CS53L30_MCLK_INT_SCALE_SHIFT)
+#define CS53L30_MCLK_INT_SCALE (1 << CS53L30_MCLK_INT_SCALE_SHIFT)
+#define CS53L30_DMIC_DRIVE_SHIFT 5
+#define CS53L30_DMIC_DRIVE_MASK (1 << CS53L30_DMIC_DRIVE_SHIFT)
+#define CS53L30_DMIC_DRIVE (1 << CS53L30_DMIC_DRIVE_SHIFT)
+#define CS53L30_MCLK_DIV_SHIFT 2
+#define CS53L30_MCLK_DIV_WIDTH 2
+#define CS53L30_MCLK_DIV_MASK (((1 << CS53L30_MCLK_DIV_WIDTH) - 1) << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_MCLK_DIV_BY_1 (0x0 << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_MCLK_DIV_BY_2 (0x1 << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_MCLK_DIV_BY_3 (0x2 << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_SYNC_EN_SHIFT 1
+#define CS53L30_SYNC_EN_MASK (1 << CS53L30_SYNC_EN_SHIFT)
+#define CS53L30_SYNC_EN (1 << CS53L30_SYNC_EN_SHIFT)
+
+#define CS53L30_MCLKCTL_DEFAULT (CS53L30_MCLK_DIV_BY_2)
+
+/* R8 (0x08) CS53L30_INT_SR_CTL - Internal Sample Rate Control */
+#define CS53L30_INTRNL_FS_RATIO_SHIFT 4
+#define CS53L30_INTRNL_FS_RATIO_MASK (1 << CS53L30_INTRNL_FS_RATIO_SHIFT)
+#define CS53L30_INTRNL_FS_RATIO (1 << CS53L30_INTRNL_FS_RATIO_SHIFT)
+#define CS53L30_MCLK_19MHZ_EN_SHIFT 0
+#define CS53L30_MCLK_19MHZ_EN_MASK (1 << CS53L30_MCLK_19MHZ_EN_SHIFT)
+#define CS53L30_MCLK_19MHZ_EN (1 << CS53L30_MCLK_19MHZ_EN_SHIFT)
+
+/* 0x6 << 1 is reserved bits */
+#define CS53L30_INT_SR_CTL_DEFAULT (CS53L30_INTRNL_FS_RATIO | 0x6 << 1)
+
+/* R10 (0x0A) CS53L30_MICBIAS_CTL - Mic Bias Control */
+#define CS53L30_MIC4_BIAS_PDN_SHIFT 7
+#define CS53L30_MIC4_BIAS_PDN_MASK (1 << CS53L30_MIC4_BIAS_PDN_SHIFT)
+#define CS53L30_MIC4_BIAS_PDN (1 << CS53L30_MIC4_BIAS_PDN_SHIFT)
+#define CS53L30_MIC3_BIAS_PDN_SHIFT 6
+#define CS53L30_MIC3_BIAS_PDN_MASK (1 << CS53L30_MIC3_BIAS_PDN_SHIFT)
+#define CS53L30_MIC3_BIAS_PDN (1 << CS53L30_MIC3_BIAS_PDN_SHIFT)
+#define CS53L30_MIC2_BIAS_PDN_SHIFT 5
+#define CS53L30_MIC2_BIAS_PDN_MASK (1 << CS53L30_MIC2_BIAS_PDN_SHIFT)
+#define CS53L30_MIC2_BIAS_PDN (1 << CS53L30_MIC2_BIAS_PDN_SHIFT)
+#define CS53L30_MIC1_BIAS_PDN_SHIFT 4
+#define CS53L30_MIC1_BIAS_PDN_MASK (1 << CS53L30_MIC1_BIAS_PDN_SHIFT)
+#define CS53L30_MIC1_BIAS_PDN (1 << CS53L30_MIC1_BIAS_PDN_SHIFT)
+#define CS53L30_MICx_BIAS_PDN (0xf << CS53L30_MIC1_BIAS_PDN_SHIFT)
+#define CS53L30_VP_MIN_SHIFT 2
+#define CS53L30_VP_MIN_MASK (1 << CS53L30_VP_MIN_SHIFT)
+#define CS53L30_VP_MIN (1 << CS53L30_VP_MIN_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_SHIFT 0
+#define CS53L30_MIC_BIAS_CTRL_WIDTH 2
+#define CS53L30_MIC_BIAS_CTRL_MASK (((1 << CS53L30_MIC_BIAS_CTRL_WIDTH) - 1) << CS53L30_MIC_BIAS_CTRL_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_HIZ (0 << CS53L30_MIC_BIAS_CTRL_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_1V8 (1 << CS53L30_MIC_BIAS_CTRL_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_2V75 (2 << CS53L30_MIC_BIAS_CTRL_SHIFT)
+
+#define CS53L30_MICBIAS_CTL_DEFAULT (CS53L30_MICx_BIAS_PDN | CS53L30_VP_MIN)
+
+/* R12 (0x0C) CS53L30_ASPCFG_CTL - ASP Configuration Control */
+#define CS53L30_ASP_MS_SHIFT 7
+#define CS53L30_ASP_MS_MASK (1 << CS53L30_ASP_MS_SHIFT)
+#define CS53L30_ASP_MS (1 << CS53L30_ASP_MS_SHIFT)
+#define CS53L30_ASP_SCLK_INV_SHIFT 4
+#define CS53L30_ASP_SCLK_INV_MASK (1 << CS53L30_ASP_SCLK_INV_SHIFT)
+#define CS53L30_ASP_SCLK_INV (1 << CS53L30_ASP_SCLK_INV_SHIFT)
+#define CS53L30_ASP_RATE_SHIFT 0
+#define CS53L30_ASP_RATE_WIDTH 4
+#define CS53L30_ASP_RATE_MASK (((1 << CS53L30_ASP_RATE_WIDTH) - 1) << CS53L30_ASP_RATE_SHIFT)
+#define CS53L30_ASP_RATE_48K (0xc << CS53L30_ASP_RATE_SHIFT)
+
+#define CS53L30_ASPCFG_CTL_DEFAULT (CS53L30_ASP_RATE_48K)
+
+/* R13/R24 (0x0D/0x18) CS53L30_ASP_CTL1 & CS53L30_ASP_CTL2 - ASP Control 1~2 */
+#define CS53L30_ASP_TDM_PDN_SHIFT 7
+#define CS53L30_ASP_TDM_PDN_MASK (1 << CS53L30_ASP_TDM_PDN_SHIFT)
+#define CS53L30_ASP_TDM_PDN (1 << CS53L30_ASP_TDM_PDN_SHIFT)
+#define CS53L30_ASP_SDOUTx_PDN_SHIFT 6
+#define CS53L30_ASP_SDOUTx_PDN_MASK (1 << CS53L30_ASP_SDOUTx_PDN_SHIFT)
+#define CS53L30_ASP_SDOUTx_PDN (1 << CS53L30_ASP_SDOUTx_PDN_SHIFT)
+#define CS53L30_ASP_3ST_SHIFT 5
+#define CS53L30_ASP_3ST_MASK (1 << CS53L30_ASP_3ST_SHIFT)
+#define CS53L30_ASP_3ST (1 << CS53L30_ASP_3ST_SHIFT)
+#define CS53L30_SHIFT_LEFT_SHIFT 4
+#define CS53L30_SHIFT_LEFT_MASK (1 << CS53L30_SHIFT_LEFT_SHIFT)
+#define CS53L30_SHIFT_LEFT (1 << CS53L30_SHIFT_LEFT_SHIFT)
+#define CS53L30_ASP_SDOUTx_DRIVE_SHIFT 0
+#define CS53L30_ASP_SDOUTx_DRIVE_MASK (1 << CS53L30_ASP_SDOUTx_DRIVE_SHIFT)
+#define CS53L30_ASP_SDOUTx_DRIVE (1 << CS53L30_ASP_SDOUTx_DRIVE_SHIFT)
+
+#define CS53L30_ASP_CTL1_DEFAULT (CS53L30_ASP_TDM_PDN)
+#define CS53L30_ASP_CTL2_DEFAULT (0)
+
+/* R14 (0x0E) ~ R17 (0x11) CS53L30_ASP_TDMTX_CTLx - ASP TDM TX Control 1~4 */
+#define CS53L30_ASP_CHx_TX_STATE_SHIFT 7
+#define CS53L30_ASP_CHx_TX_STATE_MASK (1 << CS53L30_ASP_CHx_TX_STATE_SHIFT)
+#define CS53L30_ASP_CHx_TX_STATE (1 << CS53L30_ASP_CHx_TX_STATE_SHIFT)
+#define CS53L30_ASP_CHx_TX_LOC_SHIFT 0
+#define CS53L30_ASP_CHx_TX_LOC_WIDTH 6
+#define CS53L30_ASP_CHx_TX_LOC_MASK (((1 << CS53L30_ASP_CHx_TX_LOC_WIDTH) - 1) << CS53L30_ASP_CHx_TX_LOC_SHIFT)
+#define CS53L30_ASP_CHx_TX_LOC_MAX (47 << CS53L30_ASP_CHx_TX_LOC_SHIFT)
+#define CS53L30_ASP_CHx_TX_LOC(x) ((x) << CS53L30_ASP_CHx_TX_LOC_SHIFT)
+
+#define CS53L30_ASP_TDMTX_CTLx_DEFAULT (CS53L30_ASP_CHx_TX_LOC_MAX)
+
+/* R18 (0x12) ~ R23 (0x17) CS53L30_ASP_TDMTX_ENx - ASP TDM TX Enable 1~6 */
+#define CS53L30_ASP_TDMTX_ENx_DEFAULT (0)
+
+/* R26 (0x1A) CS53L30_SFT_RAMP - Soft Ramp Control */
+#define CS53L30_DIGSFT_SHIFT 5
+#define CS53L30_DIGSFT_MASK (1 << CS53L30_DIGSFT_SHIFT)
+#define CS53L30_DIGSFT (1 << CS53L30_DIGSFT_SHIFT)
+
+#define CS53L30_SFT_RMP_DEFAULT (0)
+
+/* R28 (0x1C) CS53L30_LRCK_CTL2 - LRCK Control 2 */
+#define CS53L30_LRCK_50_NPW_SHIFT 3
+#define CS53L30_LRCK_50_NPW_MASK (1 << CS53L30_LRCK_50_NPW_SHIFT)
+#define CS53L30_LRCK_50_NPW (1 << CS53L30_LRCK_50_NPW_SHIFT)
+#define CS53L30_LRCK_TPWH_SHIFT 0
+#define CS53L30_LRCK_TPWH_WIDTH 3
+#define CS53L30_LRCK_TPWH_MASK (((1 << CS53L30_LRCK_TPWH_WIDTH) - 1) << CS53L30_LRCK_TPWH_SHIFT)
+#define CS53L30_LRCK_TPWH(x) (((x) << CS53L30_LRCK_TPWH_SHIFT) & CS53L30_LRCK_TPWH_MASK)
+
+#define CS53L30_LRCK_CTLx_DEFAULT (0)
+
+/* R31 (0x1F) CS53L30_MUTEP_CTL1 - MUTE Pin Control 1 */
+#define CS53L30_MUTE_PDN_ULP_SHIFT 7
+#define CS53L30_MUTE_PDN_ULP_MASK (1 << CS53L30_MUTE_PDN_ULP_SHIFT)
+#define CS53L30_MUTE_PDN_ULP (1 << CS53L30_MUTE_PDN_ULP_SHIFT)
+#define CS53L30_MUTE_PDN_LP_SHIFT 6
+#define CS53L30_MUTE_PDN_LP_MASK (1 << CS53L30_MUTE_PDN_LP_SHIFT)
+#define CS53L30_MUTE_PDN_LP (1 << CS53L30_MUTE_PDN_LP_SHIFT)
+#define CS53L30_MUTE_M4B_PDN_SHIFT 4
+#define CS53L30_MUTE_M4B_PDN_MASK (1 << CS53L30_MUTE_M4B_PDN_SHIFT)
+#define CS53L30_MUTE_M4B_PDN (1 << CS53L30_MUTE_M4B_PDN_SHIFT)
+#define CS53L30_MUTE_M3B_PDN_SHIFT 3
+#define CS53L30_MUTE_M3B_PDN_MASK (1 << CS53L30_MUTE_M3B_PDN_SHIFT)
+#define CS53L30_MUTE_M3B_PDN (1 << CS53L30_MUTE_M3B_PDN_SHIFT)
+#define CS53L30_MUTE_M2B_PDN_SHIFT 2
+#define CS53L30_MUTE_M2B_PDN_MASK (1 << CS53L30_MUTE_M2B_PDN_SHIFT)
+#define CS53L30_MUTE_M2B_PDN (1 << CS53L30_MUTE_M2B_PDN_SHIFT)
+#define CS53L30_MUTE_M1B_PDN_SHIFT 1
+#define CS53L30_MUTE_M1B_PDN_MASK (1 << CS53L30_MUTE_M1B_PDN_SHIFT)
+#define CS53L30_MUTE_M1B_PDN (1 << CS53L30_MUTE_M1B_PDN_SHIFT)
+/* Note: be careful - x starts from 0 */
+#define CS53L30_MUTE_MxB_PDN_SHIFT(x) (CS53L30_MUTE_M1B_PDN_SHIFT + (x))
+#define CS53L30_MUTE_MxB_PDN_MASK(x) (1 << CS53L30_MUTE_MxB_PDN_SHIFT(x))
+#define CS53L30_MUTE_MxB_PDN(x) (1 << CS53L30_MUTE_MxB_PDN_SHIFT(x))
+#define CS53L30_MUTE_MB_ALL_PDN_SHIFT 0
+#define CS53L30_MUTE_MB_ALL_PDN_MASK (1 << CS53L30_MUTE_MB_ALL_PDN_SHIFT)
+#define CS53L30_MUTE_MB_ALL_PDN (1 << CS53L30_MUTE_MB_ALL_PDN_SHIFT)
+
+#define CS53L30_MUTEP_CTL1_MUTEALL (0xdf)
+#define CS53L30_MUTEP_CTL1_DEFAULT (0)
+
+/* R32 (0x20) CS53L30_MUTEP_CTL2 - MUTE Pin Control 2 */
+#define CS53L30_MUTE_PIN_POLARITY_SHIFT 7
+#define CS53L30_MUTE_PIN_POLARITY_MASK (1 << CS53L30_MUTE_PIN_POLARITY_SHIFT)
+#define CS53L30_MUTE_PIN_POLARITY (1 << CS53L30_MUTE_PIN_POLARITY_SHIFT)
+#define CS53L30_MUTE_ASP_TDM_PDN_SHIFT 6
+#define CS53L30_MUTE_ASP_TDM_PDN_MASK (1 << CS53L30_MUTE_ASP_TDM_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_TDM_PDN (1 << CS53L30_MUTE_ASP_TDM_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT2_PDN_SHIFT 5
+#define CS53L30_MUTE_ASP_SDOUT2_PDN_MASK (1 << CS53L30_MUTE_ASP_SDOUT2_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT2_PDN (1 << CS53L30_MUTE_ASP_SDOUT2_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT 4
+#define CS53L30_MUTE_ASP_SDOUT1_PDN_MASK (1 << CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT1_PDN (1 << CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT)
+/* Note: be careful - x starts from 0 */
+#define CS53L30_MUTE_ASP_SDOUTx_PDN_SHIFT(x) ((x) + CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUTx_PDN_MASK(x) (1 << CS53L30_MUTE_ASP_SDOUTx_PDN_SHIFT(x))
+#define CS53L30_MUTE_ASP_SDOUTx_PDN (1 << CS53L30_MUTE_ASP_SDOUTx_PDN_SHIFT(x))
+#define CS53L30_MUTE_ADC2B_PDN_SHIFT 3
+#define CS53L30_MUTE_ADC2B_PDN_MASK (1 << CS53L30_MUTE_ADC2B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC2B_PDN (1 << CS53L30_MUTE_ADC2B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC2A_PDN_SHIFT 2
+#define CS53L30_MUTE_ADC2A_PDN_MASK (1 << CS53L30_MUTE_ADC2A_PDN_SHIFT)
+#define CS53L30_MUTE_ADC2A_PDN (1 << CS53L30_MUTE_ADC2A_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1B_PDN_SHIFT 1
+#define CS53L30_MUTE_ADC1B_PDN_MASK (1 << CS53L30_MUTE_ADC1B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1B_PDN (1 << CS53L30_MUTE_ADC1B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1A_PDN_SHIFT 0
+#define CS53L30_MUTE_ADC1A_PDN_MASK (1 << CS53L30_MUTE_ADC1A_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1A_PDN (1 << CS53L30_MUTE_ADC1A_PDN_SHIFT)
+
+#define CS53L30_MUTEP_CTL2_DEFAULT (CS53L30_MUTE_PIN_POLARITY)
+
+/* R33 (0x21) CS53L30_INBIAS_CTL1 - Input Bias Control 1 */
+#define CS53L30_IN4M_BIAS_SHIFT 6
+#define CS53L30_IN4M_BIAS_WIDTH 2
+#define CS53L30_IN4M_BIAS_MASK (((1 << CS53L30_IN4M_BIAS_WIDTH) - 1) << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4M_BIAS_OPEN (0 << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4M_BIAS_PULL_DOWN (1 << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4M_BIAS_VCM (2 << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_SHIFT 4
+#define CS53L30_IN4P_BIAS_WIDTH 2
+#define CS53L30_IN4P_BIAS_MASK (((1 << CS53L30_IN4P_BIAS_WIDTH) - 1) << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_OPEN (0 << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_PULL_DOWN (1 << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_VCM (2 << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_SHIFT 2
+#define CS53L30_IN3M_BIAS_WIDTH 2
+#define CS53L30_IN3M_BIAS_MASK (((1 << CS53L30_IN3M_BIAS_WIDTH) - 1) << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_OPEN (0 << CS53L30_IN3M_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_PULL_DOWN (1 << CS53L30_IN3M_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_VCM (2 << CS53L30_IN3M_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_SHIFT 0
+#define CS53L30_IN3P_BIAS_WIDTH 2
+#define CS53L30_IN3P_BIAS_MASK (((1 << CS53L30_IN3P_BIAS_WIDTH) - 1) << CS53L30_IN3P_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_OPEN (0 << CS53L30_IN3P_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_PULL_DOWN (1 << CS53L30_IN3P_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_VCM (2 << CS53L30_IN3P_BIAS_SHIFT)
+
+#define CS53L30_INBIAS_CTL1_DEFAULT (CS53L30_IN4M_BIAS_VCM | CS53L30_IN4P_BIAS_VCM |\
+ CS53L30_IN3M_BIAS_VCM | CS53L30_IN3P_BIAS_VCM)
+
+/* R34 (0x22) CS53L30_INBIAS_CTL2 - Input Bias Control 2 */
+#define CS53L30_IN2M_BIAS_SHIFT 6
+#define CS53L30_IN2M_BIAS_WIDTH 2
+#define CS53L30_IN2M_BIAS_MASK (((1 << CS53L30_IN2M_BIAS_WIDTH) - 1) << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2M_BIAS_OPEN (0 << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2M_BIAS_PULL_DOWN (1 << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2M_BIAS_VCM (2 << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_SHIFT 4
+#define CS53L30_IN2P_BIAS_WIDTH 2
+#define CS53L30_IN2P_BIAS_MASK (((1 << CS53L30_IN2P_BIAS_WIDTH) - 1) << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_OPEN (0 << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_PULL_DOWN (1 << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_VCM (2 << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_SHIFT 2
+#define CS53L30_IN1M_BIAS_WIDTH 2
+#define CS53L30_IN1M_BIAS_MASK (((1 << CS53L30_IN1M_BIAS_WIDTH) - 1) << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_OPEN (0 << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_PULL_DOWN (1 << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_VCM (2 << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_SHIFT 0
+#define CS53L30_IN1P_BIAS_WIDTH 2
+#define CS53L30_IN1P_BIAS_MASK (((1 << CS53L30_IN1P_BIAS_WIDTH) - 1) << CS53L30_IN1P_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_OPEN (0 << CS53L30_IN1P_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_PULL_DOWN (1 << CS53L30_IN1P_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_VCM (2 << CS53L30_IN1P_BIAS_SHIFT)
+
+#define CS53L30_INBIAS_CTL2_DEFAULT (CS53L30_IN2M_BIAS_VCM | CS53L30_IN2P_BIAS_VCM |\
+ CS53L30_IN1M_BIAS_VCM | CS53L30_IN1P_BIAS_VCM)
+
+/* R35 (0x23) & R36 (0x24) CS53L30_DMICx_STR_CTL - DMIC1 & DMIC2 Stereo Control */
+#define CS53L30_DMICx_STEREO_ENB_SHIFT 5
+#define CS53L30_DMICx_STEREO_ENB_MASK (1 << CS53L30_DMICx_STEREO_ENB_SHIFT)
+#define CS53L30_DMICx_STEREO_ENB (1 << CS53L30_DMICx_STEREO_ENB_SHIFT)
+
+/* 0x88 and 0xCC are reserved bits */
+#define CS53L30_DMIC1_STR_CTL_DEFAULT (CS53L30_DMICx_STEREO_ENB | 0x88)
+#define CS53L30_DMIC2_STR_CTL_DEFAULT (CS53L30_DMICx_STEREO_ENB | 0xCC)
+
+/* R37/R45 (0x25/0x2D) CS53L30_ADCDMICx_CTL1 - ADC1/DMIC1 & ADC2/DMIC2 Control 1 */
+#define CS53L30_ADCxB_PDN_SHIFT 7
+#define CS53L30_ADCxB_PDN_MASK (1 << CS53L30_ADCxB_PDN_SHIFT)
+#define CS53L30_ADCxB_PDN (1 << CS53L30_ADCxB_PDN_SHIFT)
+#define CS53L30_ADCxA_PDN_SHIFT 6
+#define CS53L30_ADCxA_PDN_MASK (1 << CS53L30_ADCxA_PDN_SHIFT)
+#define CS53L30_ADCxA_PDN (1 << CS53L30_ADCxA_PDN_SHIFT)
+#define CS53L30_DMICx_PDN_SHIFT 2
+#define CS53L30_DMICx_PDN_MASK (1 << CS53L30_DMICx_PDN_SHIFT)
+#define CS53L30_DMICx_PDN (1 << CS53L30_DMICx_PDN_SHIFT)
+#define CS53L30_DMICx_SCLK_DIV_SHIFT 1
+#define CS53L30_DMICx_SCLK_DIV_MASK (1 << CS53L30_DMICx_SCLK_DIV_SHIFT)
+#define CS53L30_DMICx_SCLK_DIV (1 << CS53L30_DMICx_SCLK_DIV_SHIFT)
+#define CS53L30_CH_TYPE_SHIFT 0
+#define CS53L30_CH_TYPE_MASK (1 << CS53L30_CH_TYPE_SHIFT)
+#define CS53L30_CH_TYPE (1 << CS53L30_CH_TYPE_SHIFT)
+
+#define CS53L30_ADCDMICx_PDN_MASK 0xFF
+#define CS53L30_ADCDMICx_CTL1_DEFAULT (CS53L30_DMICx_PDN)
+
+/* R38/R46 (0x26/0x2E) CS53L30_ADCDMICx_CTL2 - ADC1/DMIC1 & ADC2/DMIC2 Control 2 */
+#define CS53L30_ADCx_NOTCH_DIS_SHIFT 7
+#define CS53L30_ADCx_NOTCH_DIS_MASK (1 << CS53L30_ADCx_NOTCH_DIS_SHIFT)
+#define CS53L30_ADCx_NOTCH_DIS (1 << CS53L30_ADCx_NOTCH_DIS_SHIFT)
+#define CS53L30_ADCxB_INV_SHIFT 5
+#define CS53L30_ADCxB_INV_MASK (1 << CS53L30_ADCxB_INV_SHIFT)
+#define CS53L30_ADCxB_INV (1 << CS53L30_ADCxB_INV_SHIFT)
+#define CS53L30_ADCxA_INV_SHIFT 4
+#define CS53L30_ADCxA_INV_MASK (1 << CS53L30_ADCxA_INV_SHIFT)
+#define CS53L30_ADCxA_INV (1 << CS53L30_ADCxA_INV_SHIFT)
+#define CS53L30_ADCxB_DIG_BOOST_SHIFT 1
+#define CS53L30_ADCxB_DIG_BOOST_MASK (1 << CS53L30_ADCxB_DIG_BOOST_SHIFT)
+#define CS53L30_ADCxB_DIG_BOOST (1 << CS53L30_ADCxB_DIG_BOOST_SHIFT)
+#define CS53L30_ADCxA_DIG_BOOST_SHIFT 0
+#define CS53L30_ADCxA_DIG_BOOST_MASK (1 << CS53L30_ADCxA_DIG_BOOST_SHIFT)
+#define CS53L30_ADCxA_DIG_BOOST (1 << CS53L30_ADCxA_DIG_BOOST_SHIFT)
+
+#define CS53L30_ADCDMIC1_CTL2_DEFAULT (0)
+
+/* R39/R47 (0x27/0x2F) CS53L30_ADCx_CTL3 - ADC1/ADC2 Control 3 */
+#define CS53L30_ADCx_HPF_EN_SHIFT 3
+#define CS53L30_ADCx_HPF_EN_MASK (1 << CS53L30_ADCx_HPF_EN_SHIFT)
+#define CS53L30_ADCx_HPF_EN (1 << CS53L30_ADCx_HPF_EN_SHIFT)
+#define CS53L30_ADCx_HPF_CF_SHIFT 1
+#define CS53L30_ADCx_HPF_CF_WIDTH 2
+#define CS53L30_ADCx_HPF_CF_MASK (((1 << CS53L30_ADCx_HPF_CF_WIDTH) - 1) << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_1HZ86 (0 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_120HZ (1 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_235HZ (2 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_466HZ (3 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_NG_ALL_SHIFT 0
+#define CS53L30_ADCx_NG_ALL_MASK (1 << CS53L30_ADCx_NG_ALL_SHIFT)
+#define CS53L30_ADCx_NG_ALL (1 << CS53L30_ADCx_NG_ALL_SHIFT)
+
+#define CS53L30_ADCx_CTL3_DEFAULT (CS53L30_ADCx_HPF_EN)
+
+/* R40/R48 (0x28/0x30) CS53L30_ADCx_NG_CTL - ADC1/ADC2 Noise Gate Control */
+#define CS53L30_ADCxB_NG_SHIFT 7
+#define CS53L30_ADCxB_NG_MASK (1 << CS53L30_ADCxB_NG_SHIFT)
+#define CS53L30_ADCxB_NG (1 << CS53L30_ADCxB_NG_SHIFT)
+#define CS53L30_ADCxA_NG_SHIFT 6
+#define CS53L30_ADCxA_NG_MASK (1 << CS53L30_ADCxA_NG_SHIFT)
+#define CS53L30_ADCxA_NG (1 << CS53L30_ADCxA_NG_SHIFT)
+#define CS53L30_ADCx_NG_BOOST_SHIFT 5
+#define CS53L30_ADCx_NG_BOOST_MASK (1 << CS53L30_ADCx_NG_BOOST_SHIFT)
+#define CS53L30_ADCx_NG_BOOST (1 << CS53L30_ADCx_NG_BOOST_SHIFT)
+#define CS53L30_ADCx_NG_THRESH_SHIFT 2
+#define CS53L30_ADCx_NG_THRESH_WIDTH 3
+#define CS53L30_ADCx_NG_THRESH_MASK (((1 << CS53L30_ADCx_NG_THRESH_WIDTH) - 1) << CS53L30_ADCx_NG_THRESH_SHIFT)
+#define CS53L30_ADCx_NG_DELAY_SHIFT 0
+#define CS53L30_ADCx_NG_DELAY_WIDTH 2
+#define CS53L30_ADCx_NG_DELAY_MASK (((1 << CS53L30_ADCx_NG_DELAY_WIDTH) - 1) << CS53L30_ADCx_NG_DELAY_SHIFT)
+
+#define CS53L30_ADCx_NG_CTL_DEFAULT (0)
+
+/* R41/R42/R49/R50 (0x29/0x2A/0x31/0x32) CS53L30_ADCxy_AFE_CTL - ADC1A/1B/2A/2B AFE Control */
+#define CS53L30_ADCxy_PREAMP_SHIFT 6
+#define CS53L30_ADCxy_PREAMP_WIDTH 2
+#define CS53L30_ADCxy_PREAMP_MASK (((1 << CS53L30_ADCxy_PREAMP_WIDTH) - 1) << CS53L30_ADCxy_PREAMP_SHIFT)
+#define CS53L30_ADCxy_PGA_VOL_SHIFT 0
+#define CS53L30_ADCxy_PGA_VOL_WIDTH 6
+#define CS53L30_ADCxy_PGA_VOL_MASK (((1 << CS53L30_ADCxy_PGA_VOL_WIDTH) - 1) << CS53L30_ADCxy_PGA_VOL_SHIFT)
+
+#define CS53L30_ADCxy_AFE_CTL_DEFAULT (0)
+
+/* R43/R44/R51/R52 (0x2B/0x2C/0x33/0x34) CS53L30_ADCxy_DIG_VOL - ADC1A/1B/2A/2B Digital Volume */
+#define CS53L30_ADCxy_VOL_MUTE (0x80)
+
+#define CS53L30_ADCxy_DIG_VOL_DEFAULT (0x0)
+
+/* CS53L30_INT */
+#define CS53L30_PDN_DONE (1 << 7)
+#define CS53L30_THMS_TRIP (1 << 6)
+#define CS53L30_SYNC_DONE (1 << 5)
+#define CS53L30_ADC2B_OVFL (1 << 4)
+#define CS53L30_ADC2A_OVFL (1 << 3)
+#define CS53L30_ADC1B_OVFL (1 << 2)
+#define CS53L30_ADC1A_OVFL (1 << 1)
+#define CS53L30_MUTE_PIN (1 << 0)
+#define CS53L30_DEVICE_INT_MASK 0xFF
+
+#endif /* __CS53L30_H__ */
if (!tty->disc_data)
return -ENODEV;
+ tty->receive_room = 16;
if (tty->ops->write(tty, v253_init, len) != len) {
ret = -EIO;
goto err;
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/delay.h>
}
/*
- * DT to pdata conversion
+ * DT/ACPI to pdata conversion
*/
static enum da7219_aad_micbias_pulse_lvl
- da7219_aad_of_micbias_pulse_lvl(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_micbias_pulse_lvl(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 2800:
}
static enum da7219_aad_btn_cfg
- da7219_aad_of_btn_cfg(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_btn_cfg(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 2:
}
static enum da7219_aad_mic_det_thr
- da7219_aad_of_mic_det_thr(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_mic_det_thr(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 200:
}
static enum da7219_aad_jack_ins_deb
- da7219_aad_of_jack_ins_deb(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_jack_ins_deb(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 5:
}
static enum da7219_aad_jack_det_rate
- da7219_aad_of_jack_det_rate(struct snd_soc_codec *codec, const char *str)
+ da7219_aad_fw_jack_det_rate(struct snd_soc_codec *codec, const char *str)
{
if (!strcmp(str, "32ms_64ms")) {
return DA7219_AAD_JACK_DET_RATE_32_64MS;
}
static enum da7219_aad_jack_rem_deb
- da7219_aad_of_jack_rem_deb(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_jack_rem_deb(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
}
static enum da7219_aad_btn_avg
- da7219_aad_of_btn_avg(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_btn_avg(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
}
static enum da7219_aad_adc_1bit_rpt
- da7219_aad_of_adc_1bit_rpt(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_adc_1bit_rpt(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
}
}
-static struct da7219_aad_pdata *da7219_aad_of_to_pdata(struct snd_soc_codec *codec)
+static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct snd_soc_codec *codec)
{
- struct device_node *np = codec->dev->of_node;
- struct device_node *aad_np = of_find_node_by_name(np, "da7219_aad");
+ struct device *dev = codec->dev;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct fwnode_handle *aad_np;
struct da7219_aad_pdata *aad_pdata;
- const char *of_str;
- u32 of_val32;
+ const char *fw_str;
+ u32 fw_val32;
+ aad_np = device_get_named_child_node(dev, "da7219_aad");
if (!aad_np)
return NULL;
aad_pdata = devm_kzalloc(codec->dev, sizeof(*aad_pdata), GFP_KERNEL);
if (!aad_pdata)
- goto out;
+ return NULL;
- aad_pdata->irq = irq_of_parse_and_map(np, 0);
+ aad_pdata->irq = i2c->irq;
- if (of_property_read_u32(aad_np, "dlg,micbias-pulse-lvl",
- &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,micbias-pulse-lvl",
+ &fw_val32) >= 0)
aad_pdata->micbias_pulse_lvl =
- da7219_aad_of_micbias_pulse_lvl(codec, of_val32);
+ da7219_aad_fw_micbias_pulse_lvl(codec, fw_val32);
else
aad_pdata->micbias_pulse_lvl = DA7219_AAD_MICBIAS_PULSE_LVL_OFF;
- if (of_property_read_u32(aad_np, "dlg,micbias-pulse-time",
- &of_val32) >= 0)
- aad_pdata->micbias_pulse_time = of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,micbias-pulse-time",
+ &fw_val32) >= 0)
+ aad_pdata->micbias_pulse_time = fw_val32;
- if (of_property_read_u32(aad_np, "dlg,btn-cfg", &of_val32) >= 0)
- aad_pdata->btn_cfg = da7219_aad_of_btn_cfg(codec, of_val32);
+ if (fwnode_property_read_u32(aad_np, "dlg,btn-cfg", &fw_val32) >= 0)
+ aad_pdata->btn_cfg = da7219_aad_fw_btn_cfg(codec, fw_val32);
else
aad_pdata->btn_cfg = DA7219_AAD_BTN_CFG_10MS;
- if (of_property_read_u32(aad_np, "dlg,mic-det-thr", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,mic-det-thr", &fw_val32) >= 0)
aad_pdata->mic_det_thr =
- da7219_aad_of_mic_det_thr(codec, of_val32);
+ da7219_aad_fw_mic_det_thr(codec, fw_val32);
else
aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
- if (of_property_read_u32(aad_np, "dlg,jack-ins-deb", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
aad_pdata->jack_ins_deb =
- da7219_aad_of_jack_ins_deb(codec, of_val32);
+ da7219_aad_fw_jack_ins_deb(codec, fw_val32);
else
aad_pdata->jack_ins_deb = DA7219_AAD_JACK_INS_DEB_20MS;
- if (!of_property_read_string(aad_np, "dlg,jack-det-rate", &of_str))
+ if (!fwnode_property_read_string(aad_np, "dlg,jack-det-rate", &fw_str))
aad_pdata->jack_det_rate =
- da7219_aad_of_jack_det_rate(codec, of_str);
+ da7219_aad_fw_jack_det_rate(codec, fw_str);
else
aad_pdata->jack_det_rate = DA7219_AAD_JACK_DET_RATE_256_512MS;
- if (of_property_read_u32(aad_np, "dlg,jack-rem-deb", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,jack-rem-deb", &fw_val32) >= 0)
aad_pdata->jack_rem_deb =
- da7219_aad_of_jack_rem_deb(codec, of_val32);
+ da7219_aad_fw_jack_rem_deb(codec, fw_val32);
else
aad_pdata->jack_rem_deb = DA7219_AAD_JACK_REM_DEB_1MS;
- if (of_property_read_u32(aad_np, "dlg,a-d-btn-thr", &of_val32) >= 0)
- aad_pdata->a_d_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,a-d-btn-thr", &fw_val32) >= 0)
+ aad_pdata->a_d_btn_thr = (u8) fw_val32;
else
aad_pdata->a_d_btn_thr = 0xA;
- if (of_property_read_u32(aad_np, "dlg,d-b-btn-thr", &of_val32) >= 0)
- aad_pdata->d_b_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,d-b-btn-thr", &fw_val32) >= 0)
+ aad_pdata->d_b_btn_thr = (u8) fw_val32;
else
aad_pdata->d_b_btn_thr = 0x16;
- if (of_property_read_u32(aad_np, "dlg,b-c-btn-thr", &of_val32) >= 0)
- aad_pdata->b_c_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,b-c-btn-thr", &fw_val32) >= 0)
+ aad_pdata->b_c_btn_thr = (u8) fw_val32;
else
aad_pdata->b_c_btn_thr = 0x21;
- if (of_property_read_u32(aad_np, "dlg,c-mic-btn-thr", &of_val32) >= 0)
- aad_pdata->c_mic_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,c-mic-btn-thr", &fw_val32) >= 0)
+ aad_pdata->c_mic_btn_thr = (u8) fw_val32;
else
aad_pdata->c_mic_btn_thr = 0x3E;
- if (of_property_read_u32(aad_np, "dlg,btn-avg", &of_val32) >= 0)
- aad_pdata->btn_avg = da7219_aad_of_btn_avg(codec, of_val32);
+ if (fwnode_property_read_u32(aad_np, "dlg,btn-avg", &fw_val32) >= 0)
+ aad_pdata->btn_avg = da7219_aad_fw_btn_avg(codec, fw_val32);
else
aad_pdata->btn_avg = DA7219_AAD_BTN_AVG_2;
- if (of_property_read_u32(aad_np, "dlg,adc-1bit-rpt", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,adc-1bit-rpt", &fw_val32) >= 0)
aad_pdata->adc_1bit_rpt =
- da7219_aad_of_adc_1bit_rpt(codec, of_val32);
+ da7219_aad_fw_adc_1bit_rpt(codec, fw_val32);
else
aad_pdata->adc_1bit_rpt = DA7219_AAD_ADC_1BIT_RPT_1;
-out:
- of_node_put(aad_np);
-
return aad_pdata;
}
da7219->aad = da7219_aad;
da7219_aad->codec = codec;
- /* Handle any DT/platform data */
- if ((codec->dev->of_node) && (da7219->pdata))
- da7219->pdata->aad_pdata = da7219_aad_of_to_pdata(codec);
+ /* Handle any DT/ACPI/platform data */
+ if (da7219->pdata && !da7219->pdata->aad_pdata)
+ da7219->pdata->aad_pdata = da7219_aad_fw_to_pdata(codec);
da7219_aad_handle_pdata(codec);
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/pm.h>
/*
- * DT
+ * DT/ACPI
*/
static const struct of_device_id da7219_of_match[] = {
MODULE_DEVICE_TABLE(acpi, da7219_acpi_match);
static enum da7219_micbias_voltage
- da7219_of_micbias_lvl(struct snd_soc_codec *codec, u32 val)
+ da7219_fw_micbias_lvl(struct device *dev, u32 val)
{
switch (val) {
case 1600:
case 2600:
return DA7219_MICBIAS_2_6V;
default:
- dev_warn(codec->dev, "Invalid micbias level");
+ dev_warn(dev, "Invalid micbias level");
return DA7219_MICBIAS_2_2V;
}
}
static enum da7219_mic_amp_in_sel
- da7219_of_mic_amp_in_sel(struct snd_soc_codec *codec, const char *str)
+ da7219_fw_mic_amp_in_sel(struct device *dev, const char *str)
{
if (!strcmp(str, "diff")) {
return DA7219_MIC_AMP_IN_SEL_DIFF;
} else if (!strcmp(str, "se_n")) {
return DA7219_MIC_AMP_IN_SEL_SE_N;
} else {
- dev_warn(codec->dev, "Invalid mic input type selection");
+ dev_warn(dev, "Invalid mic input type selection");
return DA7219_MIC_AMP_IN_SEL_DIFF;
}
}
-static struct da7219_pdata *da7219_of_to_pdata(struct snd_soc_codec *codec)
+static struct da7219_pdata *da7219_fw_to_pdata(struct snd_soc_codec *codec)
{
- struct device_node *np = codec->dev->of_node;
+ struct device *dev = codec->dev;
struct da7219_pdata *pdata;
const char *of_str;
u32 of_val32;
- pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- if (of_property_read_u32(np, "dlg,micbias-lvl", &of_val32) >= 0)
- pdata->micbias_lvl = da7219_of_micbias_lvl(codec, of_val32);
+ if (device_property_read_u32(dev, "dlg,micbias-lvl", &of_val32) >= 0)
+ pdata->micbias_lvl = da7219_fw_micbias_lvl(dev, of_val32);
else
pdata->micbias_lvl = DA7219_MICBIAS_2_2V;
- if (!of_property_read_string(np, "dlg,mic-amp-in-sel", &of_str))
- pdata->mic_amp_in_sel = da7219_of_mic_amp_in_sel(codec, of_str);
+ if (!device_property_read_string(dev, "dlg,mic-amp-in-sel", &of_str))
+ pdata->mic_amp_in_sel = da7219_fw_mic_amp_in_sel(dev, of_str);
else
pdata->mic_amp_in_sel = DA7219_MIC_AMP_IN_SEL_DIFF;
break;
}
- /* Handle DT/Platform data */
- if (codec->dev->of_node)
- da7219->pdata = da7219_of_to_pdata(codec);
- else
- da7219->pdata = dev_get_platdata(codec->dev);
+ /* Handle DT/ACPI/Platform data */
+ da7219->pdata = dev_get_platdata(codec->dev);
+ if (!da7219->pdata)
+ da7219->pdata = da7219_fw_to_pdata(codec);
da7219_handle_pdata(codec);
}
hdac_hdmi_parse_eld(edev, pin);
- print_hex_dump_bytes("ELD: ", DUMP_PREFIX_OFFSET,
- pin->eld.eld_buffer, pin->eld.eld_size);
+ print_hex_dump_debug("ELD: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ pin->eld.eld_buffer, pin->eld.eld_size,
+ true);
} else {
pin->eld.monitor_present = false;
pin->eld.eld_valid = false;
* exit, we call pm_runtime_suspend() so that will do for us
*/
hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ if (!hlink) {
+ dev_err(&edev->hdac.dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_get(edev->ebus, hlink);
ret = create_fill_widget_route_map(dapm);
/* hold the ref while we probe */
hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ if (!hlink) {
+ dev_err(&edev->hdac.dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_get(edev->ebus, hlink);
hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
}
hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+ if (!hlink) {
+ dev_err(dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_put(ebus, hlink);
return 0;
return 0;
hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+ if (!hlink) {
+ dev_err(dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_get(ebus, hlink);
err = snd_hdac_display_power(bus, true);
static const struct hda_device_id hdmi_list[] = {
HDA_CODEC_EXT_ENTRY(0x80862809, 0x100000, "Skylake HDMI", 0),
HDA_CODEC_EXT_ENTRY(0x8086280a, 0x100000, "Broxton HDMI", 0),
+ HDA_CODEC_EXT_ENTRY(0x8086280b, 0x100000, "Kabylake HDMI", 0),
{}
};
return ret;
if (hcp->hcd.ops->audio_startup) {
- ret = hcp->hcd.ops->audio_startup(dai->dev->parent);
+ ret = hcp->hcd.ops->audio_startup(dai->dev->parent, hcp->hcd.data);
if (ret) {
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
}
if (hcp->hcd.ops->get_eld) {
- ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
- sizeof(hcp->eld));
+ ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->hcd.data,
+ hcp->eld, sizeof(hcp->eld));
if (!ret) {
ret = snd_pcm_hw_constraint_eld(substream->runtime,
WARN_ON(hcp->current_stream != substream);
- hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+ hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
hp.sample_rate = params_rate(params);
hp.channels = params_channels(params);
- return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
- &hp);
+ return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
+ &hcp->daifmt[dai->id], &hp);
}
static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
dev_dbg(dai->dev, "%s()\n", __func__);
if (hcp->hcd.ops->digital_mute)
- return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+ return hcp->hcd.ops->digital_mute(dai->dev->parent,
+ hcp->hcd.data, mute);
return 0;
}
--- /dev/null
+/*
+ * MAX98504 ALSA SoC Audio driver
+ *
+ * Copyright 2013 - 2014 Maxim Integrated Products
+ * Copyright 2016 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <sound/soc.h>
+
+#include "max98504.h"
+
+static const char * const max98504_supply_names[] = {
+ "DVDD",
+ "DIOVDD",
+ "PVDD",
+};
+#define MAX98504_NUM_SUPPLIES ARRAY_SIZE(max98504_supply_names)
+
+struct max98504_priv {
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[MAX98504_NUM_SUPPLIES];
+ unsigned int pcm_rx_channels;
+ bool brownout_enable;
+ unsigned int brownout_threshold;
+ unsigned int brownout_attenuation;
+ unsigned int brownout_attack_hold;
+ unsigned int brownout_timed_hold;
+ unsigned int brownout_release_rate;
+};
+
+static struct reg_default max98504_reg_defaults[] = {
+ { 0x01, 0},
+ { 0x02, 0},
+ { 0x03, 0},
+ { 0x04, 0},
+ { 0x10, 0},
+ { 0x11, 0},
+ { 0x12, 0},
+ { 0x13, 0},
+ { 0x14, 0},
+ { 0x15, 0},
+ { 0x16, 0},
+ { 0x17, 0},
+ { 0x18, 0},
+ { 0x19, 0},
+ { 0x1A, 0},
+ { 0x20, 0},
+ { 0x21, 0},
+ { 0x22, 0},
+ { 0x23, 0},
+ { 0x24, 0},
+ { 0x25, 0},
+ { 0x26, 0},
+ { 0x27, 0},
+ { 0x28, 0},
+ { 0x30, 0},
+ { 0x31, 0},
+ { 0x32, 0},
+ { 0x33, 0},
+ { 0x34, 0},
+ { 0x35, 0},
+ { 0x36, 0},
+ { 0x37, 0},
+ { 0x38, 0},
+ { 0x39, 0},
+ { 0x40, 0},
+ { 0x41, 0},
+};
+
+static bool max98504_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98504_INTERRUPT_STATUS:
+ case MAX98504_INTERRUPT_FLAGS:
+ case MAX98504_INTERRUPT_FLAG_CLEARS:
+ case MAX98504_WATCHDOG_CLEAR:
+ case MAX98504_GLOBAL_ENABLE:
+ case MAX98504_SOFTWARE_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max98504_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98504_SOFTWARE_RESET:
+ case MAX98504_WATCHDOG_CLEAR:
+ case MAX98504_INTERRUPT_FLAG_CLEARS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int max98504_pcm_rx_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_write(max98504->regmap, MAX98504_PCM_RX_ENABLE,
+ max98504->pcm_rx_channels);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_write(max98504->regmap, MAX98504_PCM_RX_ENABLE, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int max98504_component_probe(struct snd_soc_component *c)
+{
+ struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c);
+ struct regmap *map = max98504->regmap;
+ int ret;
+
+ ret = regulator_bulk_enable(MAX98504_NUM_SUPPLIES, max98504->supplies);
+ if (ret < 0)
+ return ret;
+
+ regmap_write(map, MAX98504_SOFTWARE_RESET, 0x1);
+ msleep(20);
+
+ if (!max98504->brownout_enable)
+ return 0;
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_ENABLE, 0x1);
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_1,
+ (max98504->brownout_threshold & 0x1f) << 3 |
+ (max98504->brownout_attenuation & 0x3));
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_2,
+ max98504->brownout_attack_hold & 0xff);
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_3,
+ max98504->brownout_timed_hold & 0xff);
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_4,
+ max98504->brownout_release_rate & 0xff);
+
+ return 0;
+}
+
+static void max98504_component_remove(struct snd_soc_component *c)
+{
+ struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c);
+
+ regulator_bulk_disable(MAX98504_NUM_SUPPLIES, max98504->supplies);
+}
+
+static const char *spk_source_mux_text[] = {
+ "PCM Monomix", "Analog In", "PDM Left", "PDM Right"
+};
+
+static const struct soc_enum spk_source_mux_enum =
+ SOC_ENUM_SINGLE(MAX98504_SPEAKER_SOURCE_SELECT,
+ 0, ARRAY_SIZE(spk_source_mux_text),
+ spk_source_mux_text);
+
+static const struct snd_kcontrol_new spk_source_mux =
+ SOC_DAPM_ENUM("SPK Source", spk_source_mux_enum);
+
+static const struct snd_soc_dapm_route max98504_dapm_routes[] = {
+ { "SPKOUT", NULL, "Global Enable" },
+ { "SPK Source", "PCM Monomix", "DAC PCM" },
+ { "SPK Source", "Analog In", "AIN" },
+ { "SPK Source", "PDM Left", "DAC PDM" },
+ { "SPK Source", "PDM Right", "DAC PDM" },
+};
+
+static const struct snd_soc_dapm_widget max98504_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("Global Enable", MAX98504_GLOBAL_ENABLE,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_INPUT("AIN"),
+ SND_SOC_DAPM_AIF_OUT("AIF2OUTL", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2OUTR", "AIF2 Capture", 1, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC PCM", NULL, SND_SOC_NOPM, 0, 0,
+ max98504_pcm_rx_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_DAC("DAC PDM", NULL, MAX98504_PDM_RX_ENABLE, 0, 0),
+ SND_SOC_DAPM_MUX("SPK Source", SND_SOC_NOPM, 0, 0, &spk_source_mux),
+ SND_SOC_DAPM_REG(snd_soc_dapm_spk, "SPKOUT",
+ MAX98504_SPEAKER_ENABLE, 0, 1, 1, 0),
+};
+
+static int max98504_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct max98504_priv *max98504 = snd_soc_dai_get_drvdata(dai);
+ struct regmap *map = max98504->regmap;
+
+
+ switch (dai->id) {
+ case MAX98504_DAI_ID_PCM:
+ regmap_write(map, MAX98504_PCM_TX_ENABLE, tx_mask);
+ max98504->pcm_rx_channels = rx_mask;
+ break;
+
+ case MAX98504_DAI_ID_PDM:
+ regmap_write(map, MAX98504_PDM_TX_ENABLE, tx_mask);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+static int max98504_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+ struct max98504_priv *max98504 = snd_soc_dai_get_drvdata(dai);
+ struct regmap *map = max98504->regmap;
+ unsigned int i, sources = 0;
+
+ for (i = 0; i < tx_num; i++)
+ if (tx_slot[i])
+ sources |= (1 << i);
+
+ switch (dai->id) {
+ case MAX98504_DAI_ID_PCM:
+ regmap_write(map, MAX98504_PCM_TX_CHANNEL_SOURCES,
+ sources);
+ break;
+
+ case MAX98504_DAI_ID_PDM:
+ regmap_write(map, MAX98504_PDM_TX_CONTROL, sources);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ regmap_write(map, MAX98504_MEASUREMENT_ENABLE, sources ? 0x3 : 0x01);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops max98504_dai_ops = {
+ .set_tdm_slot = max98504_set_tdm_slot,
+ .set_channel_map = max98504_set_channel_map,
+};
+
+#define MAX98504_FORMATS (SNDRV_PCM_FMTBIT_S8|SNDRV_PCM_FMTBIT_S16_LE|\
+ SNDRV_PCM_FMTBIT_S24_LE|SNDRV_PCM_FMTBIT_S32_LE)
+#define MAX98504_PDM_RATES (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|\
+ SNDRV_PCM_RATE_32000|SNDRV_PCM_RATE_44100|\
+ SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_88200|\
+ SNDRV_PCM_RATE_96000)
+
+static struct snd_soc_dai_driver max98504_dai[] = {
+ /* TODO: Add the PCM interface definitions */
+ {
+ .name = "max98504-aif2",
+ .id = MAX98504_DAI_ID_PDM,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98504_PDM_RATES,
+ .formats = MAX98504_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98504_PDM_RATES,
+ .formats = MAX98504_FORMATS,
+ },
+ .ops = &max98504_dai_ops,
+ },
+};
+
+static const struct snd_soc_component_driver max98504_component_driver = {
+ .probe = max98504_component_probe,
+ .remove = max98504_component_remove,
+ .dapm_widgets = max98504_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98504_dapm_widgets),
+ .dapm_routes = max98504_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max98504_dapm_routes),
+};
+
+static const struct regmap_config max98504_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MAX98504_MAX_REGISTER,
+ .reg_defaults = max98504_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max98504_reg_defaults),
+ .volatile_reg = max98504_volatile_register,
+ .readable_reg = max98504_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int max98504_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *node = dev->of_node;
+ struct max98504_priv *max98504;
+ int i, ret;
+
+ max98504 = devm_kzalloc(dev, sizeof(*max98504), GFP_KERNEL);
+ if (!max98504)
+ return -ENOMEM;
+
+ if (node) {
+ if (!of_property_read_u32(node, "maxim,brownout-threshold",
+ &max98504->brownout_threshold))
+ max98504->brownout_enable = true;
+
+ of_property_read_u32(node, "maxim,brownout-attenuation",
+ &max98504->brownout_attenuation);
+ of_property_read_u32(node, "maxim,brownout-attack-hold-ms",
+ &max98504->brownout_attack_hold);
+ of_property_read_u32(node, "maxim,brownout-timed-hold-ms",
+ &max98504->brownout_timed_hold);
+ of_property_read_u32(node, "maxim,brownout-release-rate-ms",
+ &max98504->brownout_release_rate);
+ }
+
+ max98504->regmap = devm_regmap_init_i2c(client, &max98504_regmap);
+ if (IS_ERR(max98504->regmap)) {
+ ret = PTR_ERR(max98504->regmap);
+ dev_err(&client->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < MAX98504_NUM_SUPPLIES; i++)
+ max98504->supplies[i].supply = max98504_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, MAX98504_NUM_SUPPLIES,
+ max98504->supplies);
+ if (ret < 0)
+ return ret;
+
+ i2c_set_clientdata(client, max98504);
+
+ return devm_snd_soc_register_component(dev, &max98504_component_driver,
+ max98504_dai, ARRAY_SIZE(max98504_dai));
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max98504_of_match[] = {
+ { .compatible = "maxim,max98504" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max98504_of_match);
+#endif
+
+static const struct i2c_device_id max98504_i2c_id[] = {
+ { "max98504" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max98504_i2c_id);
+
+static struct i2c_driver max98504_i2c_driver = {
+ .driver = {
+ .name = "max98504",
+ .of_match_table = of_match_ptr(max98504_of_match),
+ },
+ .probe = max98504_i2c_probe,
+ .id_table = max98504_i2c_id,
+};
+module_i2c_driver(max98504_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC MAX98504 driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * MAX98504 ALSA SoC Audio driver
+ *
+ * Copyright 2011 - 2012 Maxim Integrated Products
+ * Copyright 2016 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef MAX98504_H_
+#define MAX98504_H_
+
+/*
+ * MAX98504 Register Definitions
+ */
+#define MAX98504_INTERRUPT_STATUS 0x01
+#define MAX98504_INTERRUPT_FLAGS 0x02
+#define MAX98504_INTERRUPT_ENABLE 0x03
+#define MAX98504_INTERRUPT_FLAG_CLEARS 0x04
+#define MAX98504_GPIO_ENABLE 0x10
+#define MAX98504_GPIO_CONFIG 0x11
+#define MAX98504_WATCHDOG_ENABLE 0x12
+#define MAX98504_WATCHDOG_CONFIG 0x13
+#define MAX98504_WATCHDOG_CLEAR 0x14
+#define MAX98504_CLOCK_MONITOR_ENABLE 0x15
+#define MAX98504_PVDD_BROWNOUT_ENABLE 0x16
+#define MAX98504_PVDD_BROWNOUT_CONFIG_1 0x17
+#define MAX98504_PVDD_BROWNOUT_CONFIG_2 0x18
+#define MAX98504_PVDD_BROWNOUT_CONFIG_3 0x19
+#define MAX98504_PVDD_BROWNOUT_CONFIG_4 0x1a
+#define MAX98504_PCM_RX_ENABLE 0x20
+#define MAX98504_PCM_TX_ENABLE 0x21
+#define MAX98504_PCM_TX_HIZ_CONTROL 0x22
+#define MAX98504_PCM_TX_CHANNEL_SOURCES 0x23
+#define MAX98504_PCM_MODE_CONFIG 0x24
+#define MAX98504_PCM_DSP_CONFIG 0x25
+#define MAX98504_PCM_CLOCK_SETUP 0x26
+#define MAX98504_PCM_SAMPLE_RATE_SETUP 0x27
+#define MAX98504_PCM_TO_SPEAKER_MONOMIX 0x28
+#define MAX98504_PDM_TX_ENABLE 0x30
+#define MAX98504_PDM_TX_HIZ_CONTROL 0x31
+#define MAX98504_PDM_TX_CONTROL 0x32
+#define MAX98504_PDM_RX_ENABLE 0x33
+#define MAX98504_SPEAKER_ENABLE 0x34
+#define MAX98504_SPEAKER_SOURCE_SELECT 0x35
+#define MAX98504_MEASUREMENT_ENABLE 0x36
+#define MAX98504_ANALOGUE_INPUT_GAIN 0x37
+#define MAX98504_TEMPERATURE_LIMIT_CONFIG 0x38
+#define MAX98504_GLOBAL_ENABLE 0x40
+#define MAX98504_SOFTWARE_RESET 0x41
+#define MAX98504_REV_ID 0x7fff
+
+#define MAX98504_MAX_REGISTER 0x7fff
+
+#define MAX98504_DAI_ID_PCM 1
+#define MAX98504_DAI_ID_PDM 2
+
+#endif /* MAX98504_H_ */
--- /dev/null
+/*
+ * Driver for the MAX9860 Mono Audio Voice Codec
+ *
+ * https://datasheets.maximintegrated.com/en/ds/MAX9860.pdf
+ *
+ * The driver does not support sidetone since the DVST register field is
+ * backwards with the mute near the maximum level instead of the minimum.
+ *
+ * Author: Peter Rosin <peda@axentia.s>
+ * Copyright 2016 Axentia Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#include "max9860.h"
+
+struct max9860_priv {
+ struct regmap *regmap;
+ struct regulator *dvddio;
+ struct notifier_block dvddio_nb;
+ u8 psclk;
+ unsigned long pclk_rate;
+ int fmt;
+};
+
+static int max9860_dvddio_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct max9860_priv *max9860 = container_of(nb, struct max9860_priv,
+ dvddio_nb);
+ if (event & REGULATOR_EVENT_DISABLE) {
+ regcache_mark_dirty(max9860->regmap);
+ regcache_cache_only(max9860->regmap, true);
+ }
+
+ return 0;
+}
+
+static const struct reg_default max9860_reg_defaults[] = {
+ { MAX9860_PWRMAN, 0x00 },
+ { MAX9860_INTEN, 0x00 },
+ { MAX9860_SYSCLK, 0x00 },
+ { MAX9860_AUDIOCLKHIGH, 0x00 },
+ { MAX9860_AUDIOCLKLOW, 0x00 },
+ { MAX9860_IFC1A, 0x00 },
+ { MAX9860_IFC1B, 0x00 },
+ { MAX9860_VOICEFLTR, 0x00 },
+ { MAX9860_DACATTN, 0x00 },
+ { MAX9860_ADCLEVEL, 0x00 },
+ { MAX9860_DACGAIN, 0x00 },
+ { MAX9860_MICGAIN, 0x00 },
+ { MAX9860_MICADC, 0x00 },
+ { MAX9860_NOISEGATE, 0x00 },
+};
+
+static bool max9860_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTRSTATUS ... MAX9860_MICGAIN:
+ case MAX9860_MICADC ... MAX9860_PWRMAN:
+ case MAX9860_REVISION:
+ return true;
+ }
+
+ return false;
+}
+
+static bool max9860_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTEN ... MAX9860_MICGAIN:
+ case MAX9860_MICADC ... MAX9860_PWRMAN:
+ return true;
+ }
+
+ return false;
+}
+
+static bool max9860_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTRSTATUS:
+ case MAX9860_MICREADBACK:
+ return true;
+ }
+
+ return false;
+}
+
+static bool max9860_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTRSTATUS:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config max9860_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .readable_reg = max9860_readable,
+ .writeable_reg = max9860_writeable,
+ .volatile_reg = max9860_volatile,
+ .precious_reg = max9860_precious,
+
+ .max_register = MAX9860_MAX_REGISTER,
+ .reg_defaults = max9860_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max9860_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const DECLARE_TLV_DB_SCALE(dva_tlv, -9100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(dvg_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_RANGE(pam_tlv,
+ 0, MAX9860_PAM_MAX - 1, TLV_DB_SCALE_ITEM(-2000, 2000, 1),
+ MAX9860_PAM_MAX, MAX9860_PAM_MAX, TLV_DB_SCALE_ITEM(3000, 0, 0));
+static const DECLARE_TLV_DB_SCALE(pgam_tlv, 0, 100, 0);
+static const DECLARE_TLV_DB_SCALE(anth_tlv, -7600, 400, 1);
+static const DECLARE_TLV_DB_SCALE(agcth_tlv, -1800, 100, 0);
+
+static const char * const agchld_text[] = {
+ "AGC Disabled", "50ms", "100ms", "400ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(agchld_enum, MAX9860_MICADC,
+ MAX9860_AGCHLD_SHIFT, agchld_text);
+
+static const char * const agcsrc_text[] = {
+ "Left ADC", "Left/Right ADC"
+};
+
+static SOC_ENUM_SINGLE_DECL(agcsrc_enum, MAX9860_MICADC,
+ MAX9860_AGCSRC_SHIFT, agcsrc_text);
+
+static const char * const agcatk_text[] = {
+ "3ms", "12ms", "50ms", "200ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(agcatk_enum, MAX9860_MICADC,
+ MAX9860_AGCATK_SHIFT, agcatk_text);
+
+static const char * const agcrls_text[] = {
+ "78ms", "156ms", "312ms", "625ms",
+ "1.25s", "2.5s", "5s", "10s"
+};
+
+static SOC_ENUM_SINGLE_DECL(agcrls_enum, MAX9860_MICADC,
+ MAX9860_AGCRLS_SHIFT, agcrls_text);
+
+static const char * const filter_text[] = {
+ "Disabled",
+ "Elliptical HP 217Hz notch (16kHz)",
+ "Butterworth HP 500Hz (16kHz)",
+ "Elliptical HP 217Hz notch (8kHz)",
+ "Butterworth HP 500Hz (8kHz)",
+ "Butterworth HP 200Hz (48kHz)"
+};
+
+static SOC_ENUM_SINGLE_DECL(avflt_enum, MAX9860_VOICEFLTR,
+ MAX9860_AVFLT_SHIFT, filter_text);
+
+static SOC_ENUM_SINGLE_DECL(dvflt_enum, MAX9860_VOICEFLTR,
+ MAX9860_DVFLT_SHIFT, filter_text);
+
+static const struct snd_kcontrol_new max9860_controls[] = {
+SOC_SINGLE_TLV("Master Playback Volume", MAX9860_DACATTN,
+ MAX9860_DVA_SHIFT, MAX9860_DVA_MUTE, 1, dva_tlv),
+SOC_SINGLE_TLV("DAC Gain Volume", MAX9860_DACGAIN,
+ MAX9860_DVG_SHIFT, MAX9860_DVG_MAX, 0, dvg_tlv),
+SOC_DOUBLE_TLV("Line Capture Volume", MAX9860_ADCLEVEL,
+ MAX9860_ADCLL_SHIFT, MAX9860_ADCRL_SHIFT, MAX9860_ADCxL_MIN, 1,
+ adc_tlv),
+
+SOC_ENUM("AGC Hold Time", agchld_enum),
+SOC_ENUM("AGC/Noise Gate Source", agcsrc_enum),
+SOC_ENUM("AGC Attack Time", agcatk_enum),
+SOC_ENUM("AGC Release Time", agcrls_enum),
+
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", MAX9860_NOISEGATE,
+ MAX9860_ANTH_SHIFT, MAX9860_ANTH_MAX, 0, anth_tlv),
+SOC_SINGLE_TLV("AGC Signal Threshold Volume", MAX9860_NOISEGATE,
+ MAX9860_AGCTH_SHIFT, MAX9860_AGCTH_MIN, 1, agcth_tlv),
+
+SOC_SINGLE_TLV("Mic PGA Volume", MAX9860_MICGAIN,
+ MAX9860_PGAM_SHIFT, MAX9860_PGAM_MIN, 1, pgam_tlv),
+SOC_SINGLE_TLV("Mic Preamp Volume", MAX9860_MICGAIN,
+ MAX9860_PAM_SHIFT, MAX9860_PAM_MAX, 0, pam_tlv),
+
+SOC_ENUM("ADC Filter", avflt_enum),
+SOC_ENUM("DAC Filter", dvflt_enum),
+};
+
+static const struct snd_soc_dapm_widget max9860_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MICL"),
+SND_SOC_DAPM_INPUT("MICR"),
+
+SND_SOC_DAPM_ADC("ADCL", NULL, MAX9860_PWRMAN, MAX9860_ADCLEN_SHIFT, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, MAX9860_PWRMAN, MAX9860_ADCREN_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIFOUTL", "Capture", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIFOUTR", "Capture", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_DAC("DAC", NULL, MAX9860_PWRMAN, MAX9860_DACEN_SHIFT, 0),
+
+SND_SOC_DAPM_OUTPUT("OUT"),
+
+SND_SOC_DAPM_SUPPLY("Supply", SND_SOC_NOPM, 0, 0,
+ NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_REGULATOR_SUPPLY("AVDD", 0, 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("DVDD", 0, 0),
+SND_SOC_DAPM_CLOCK_SUPPLY("mclk"),
+};
+
+static const struct snd_soc_dapm_route max9860_dapm_routes[] = {
+ { "ADCL", NULL, "MICL" },
+ { "ADCR", NULL, "MICR" },
+ { "AIFOUTL", NULL, "ADCL" },
+ { "AIFOUTR", NULL, "ADCR" },
+
+ { "DAC", NULL, "AIFINL" },
+ { "DAC", NULL, "AIFINR" },
+ { "OUT", NULL, "DAC" },
+
+ { "Supply", NULL, "AVDD" },
+ { "Supply", NULL, "DVDD" },
+ { "Supply", NULL, "mclk" },
+
+ { "DAC", NULL, "Supply" },
+ { "ADCL", NULL, "Supply" },
+ { "ADCR", NULL, "Supply" },
+};
+
+static int max9860_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max9860_priv *max9860 = snd_soc_codec_get_drvdata(codec);
+ u8 master;
+ u8 ifc1a = 0;
+ u8 ifc1b = 0;
+ u8 sysclk = 0;
+ unsigned long n;
+ int ret;
+
+ dev_dbg(codec->dev, "hw_params %u Hz, %u channels\n",
+ params_rate(params),
+ params_channels(params));
+
+ if (params_channels(params) == 2)
+ ifc1b |= MAX9860_ST;
+
+ switch (max9860->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ master = MAX9860_MASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ifc1a |= master;
+
+ if (master) {
+ if (params_width(params) * params_channels(params) > 48)
+ ifc1b |= MAX9860_BSEL_64X;
+ else
+ ifc1b |= MAX9860_BSEL_48X;
+ }
+
+ switch (max9860->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ifc1a |= MAX9860_DDLY;
+ ifc1b |= MAX9860_ADLY;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ifc1a |= MAX9860_WCI;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ if (params_width(params) != 16) {
+ dev_err(codec->dev,
+ "DSP_A works for 16 bits per sample only.\n");
+ return -EINVAL;
+ }
+ ifc1a |= MAX9860_DDLY | MAX9860_WCI | MAX9860_HIZ | MAX9860_TDM;
+ ifc1b |= MAX9860_ADLY;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ if (params_width(params) != 16) {
+ dev_err(codec->dev,
+ "DSP_B works for 16 bits per sample only.\n");
+ return -EINVAL;
+ }
+ ifc1a |= MAX9860_WCI | MAX9860_HIZ | MAX9860_TDM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (max9860->fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ switch (max9860->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ return -EINVAL;
+ }
+ ifc1a ^= MAX9860_WCI;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ switch (max9860->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ return -EINVAL;
+ }
+ ifc1a ^= MAX9860_WCI;
+ /* fall through */
+ case SND_SOC_DAIFMT_IB_NF:
+ ifc1a ^= MAX9860_DBCI;
+ ifc1b ^= MAX9860_ABCI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "IFC1A %02x\n", ifc1a);
+ ret = regmap_write(max9860->regmap, MAX9860_IFC1A, ifc1a);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set IFC1A: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(codec->dev, "IFC1B %02x\n", ifc1b);
+ ret = regmap_write(max9860->regmap, MAX9860_IFC1B, ifc1b);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set IFC1B: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check if Integer Clock Mode is possible, but avoid it in slave mode
+ * since we then do not know if lrclk is derived from pclk and the
+ * datasheet mentions that the frequencies have to match exactly in
+ * order for this to work.
+ */
+ if (params_rate(params) == 8000 || params_rate(params) == 16000) {
+ if (master) {
+ switch (max9860->pclk_rate) {
+ case 12000000:
+ sysclk = MAX9860_FREQ_12MHZ;
+ break;
+ case 13000000:
+ sysclk = MAX9860_FREQ_13MHZ;
+ break;
+ case 19200000:
+ sysclk = MAX9860_FREQ_19_2MHZ;
+ break;
+ default:
+ /*
+ * Integer Clock Mode not possible. Leave
+ * sysclk at zero and fall through to the
+ * code below for PLL mode.
+ */
+ break;
+ }
+
+ if (sysclk && params_rate(params) == 16000)
+ sysclk |= MAX9860_16KHZ;
+ }
+ }
+
+ /*
+ * Largest possible n:
+ * 65536 * 96 * 48kHz / 10MHz -> 30199
+ * Smallest possible n:
+ * 65536 * 96 * 8kHz / 20MHz -> 2517
+ * Both fit nicely in the available 15 bits, no need to apply any mask.
+ */
+ n = DIV_ROUND_CLOSEST_ULL(65536ULL * 96 * params_rate(params),
+ max9860->pclk_rate);
+
+ if (!sysclk) {
+ /* PLL mode */
+ if (params_rate(params) > 24000)
+ sysclk |= MAX9860_16KHZ;
+
+ if (!master)
+ n |= 1; /* trigger rapid pll lock mode */
+ }
+
+ sysclk |= max9860->psclk;
+ dev_dbg(codec->dev, "SYSCLK %02x\n", sysclk);
+ ret = regmap_write(max9860->regmap,
+ MAX9860_SYSCLK, sysclk);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(codec->dev, "N %lu\n", n);
+ ret = regmap_write(max9860->regmap,
+ MAX9860_AUDIOCLKHIGH, n >> 8);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set NHI: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_write(max9860->regmap,
+ MAX9860_AUDIOCLKLOW, n & 0xff);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set NLO: %d\n", ret);
+ return ret;
+ }
+
+ if (!master) {
+ dev_dbg(codec->dev, "Enable PLL\n");
+ ret = regmap_update_bits(max9860->regmap, MAX9860_AUDIOCLKHIGH,
+ MAX9860_PLL, MAX9860_PLL);
+ if (ret) {
+ dev_err(codec->dev, "Failed to enable PLL: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int max9860_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max9860_priv *max9860 = snd_soc_codec_get_drvdata(codec);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBS_CFS:
+ max9860->fmt = fmt;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct snd_soc_dai_ops max9860_dai_ops = {
+ .hw_params = max9860_hw_params,
+ .set_fmt = max9860_set_fmt,
+};
+
+static struct snd_soc_dai_driver max9860_dai = {
+ .name = "max9860-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &max9860_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static int max9860_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct max9860_priv *max9860 = dev_get_drvdata(codec->dev);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ ret = regmap_update_bits(max9860->regmap, MAX9860_PWRMAN,
+ MAX9860_SHDN, MAX9860_SHDN);
+ if (ret) {
+ dev_err(codec->dev, "Failed to remove SHDN: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ ret = regmap_update_bits(max9860->regmap, MAX9860_PWRMAN,
+ MAX9860_SHDN, 0);
+ if (ret) {
+ dev_err(codec->dev, "Failed to request SHDN: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver max9860_codec_driver = {
+ .set_bias_level = max9860_set_bias_level,
+ .idle_bias_off = true,
+
+ .controls = max9860_controls,
+ .num_controls = ARRAY_SIZE(max9860_controls),
+ .dapm_widgets = max9860_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9860_dapm_widgets),
+ .dapm_routes = max9860_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max9860_dapm_routes),
+};
+
+#ifdef CONFIG_PM
+static int max9860_suspend(struct device *dev)
+{
+ struct max9860_priv *max9860 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(max9860->regmap, MAX9860_SYSCLK,
+ MAX9860_PSCLK, MAX9860_PSCLK_OFF);
+ if (ret) {
+ dev_err(dev, "Failed to disable clock: %d\n", ret);
+ return ret;
+ }
+
+ regulator_disable(max9860->dvddio);
+
+ return 0;
+}
+
+static int max9860_resume(struct device *dev)
+{
+ struct max9860_priv *max9860 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(max9860->dvddio);
+ if (ret) {
+ dev_err(dev, "Failed to enable DVDDIO: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(max9860->regmap, false);
+ ret = regcache_sync(max9860->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to sync cache: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(max9860->regmap, MAX9860_SYSCLK,
+ MAX9860_PSCLK, max9860->psclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max9860_pm_ops = {
+ SET_RUNTIME_PM_OPS(max9860_suspend, max9860_resume, NULL)
+};
+
+static int max9860_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct max9860_priv *max9860;
+ int ret;
+ struct clk *mclk;
+ unsigned long mclk_rate;
+ int i;
+ int intr;
+
+ max9860 = devm_kzalloc(dev, sizeof(struct max9860_priv), GFP_KERNEL);
+ if (!max9860)
+ return -ENOMEM;
+
+ max9860->dvddio = devm_regulator_get(dev, "DVDDIO");
+ if (IS_ERR(max9860->dvddio)) {
+ ret = PTR_ERR(max9860->dvddio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get DVDDIO supply: %d\n", ret);
+ return ret;
+ }
+
+ max9860->dvddio_nb.notifier_call = max9860_dvddio_event;
+
+ ret = regulator_register_notifier(max9860->dvddio, &max9860->dvddio_nb);
+ if (ret)
+ dev_err(dev, "Failed to register DVDDIO notifier: %d\n", ret);
+
+ ret = regulator_enable(max9860->dvddio);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable DVDDIO: %d\n", ret);
+ return ret;
+ }
+
+ max9860->regmap = devm_regmap_init_i2c(i2c, &max9860_regmap);
+ if (IS_ERR(max9860->regmap)) {
+ ret = PTR_ERR(max9860->regmap);
+ goto err_regulator;
+ }
+
+ dev_set_drvdata(dev, max9860);
+
+ /*
+ * mclk has to be in the 10MHz to 60MHz range.
+ * psclk is used to scale mclk into pclk so that
+ * pclk is in the 10MHz to 20MHz range.
+ */
+ mclk = clk_get(dev, "mclk");
+
+ if (IS_ERR(mclk)) {
+ ret = PTR_ERR(mclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get MCLK: %d\n", ret);
+ goto err_regulator;
+ }
+
+ mclk_rate = clk_get_rate(mclk);
+ clk_put(mclk);
+
+ if (mclk_rate > 60000000 || mclk_rate < 10000000) {
+ dev_err(dev, "Bad mclk %luHz (needs 10MHz - 60MHz)\n",
+ mclk_rate);
+ ret = -EINVAL;
+ goto err_regulator;
+ }
+ if (mclk_rate >= 40000000)
+ max9860->psclk = 3;
+ else if (mclk_rate >= 20000000)
+ max9860->psclk = 2;
+ else
+ max9860->psclk = 1;
+ max9860->pclk_rate = mclk_rate >> (max9860->psclk - 1);
+ max9860->psclk <<= MAX9860_PSCLK_SHIFT;
+ dev_dbg(dev, "mclk %lu pclk %lu\n", mclk_rate, max9860->pclk_rate);
+
+ regcache_cache_bypass(max9860->regmap, true);
+ for (i = 0; i < max9860_regmap.num_reg_defaults; ++i) {
+ ret = regmap_write(max9860->regmap,
+ max9860_regmap.reg_defaults[i].reg,
+ max9860_regmap.reg_defaults[i].def);
+ if (ret) {
+ dev_err(dev, "Failed to initialize register %u: %d\n",
+ max9860_regmap.reg_defaults[i].reg, ret);
+ goto err_regulator;
+ }
+ }
+ regcache_cache_bypass(max9860->regmap, false);
+
+ ret = regmap_read(max9860->regmap, MAX9860_INTRSTATUS, &intr);
+ if (ret) {
+ dev_err(dev, "Failed to clear INTRSTATUS: %d\n", ret);
+ goto err_regulator;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ ret = snd_soc_register_codec(dev, &max9860_codec_driver,
+ &max9860_dai, 1);
+ if (ret) {
+ dev_err(dev, "Failed to register CODEC: %d\n", ret);
+ goto err_pm;
+ }
+
+ return 0;
+
+err_pm:
+ pm_runtime_disable(dev);
+err_regulator:
+ regulator_disable(max9860->dvddio);
+ return ret;
+}
+
+static int max9860_remove(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct max9860_priv *max9860 = dev_get_drvdata(dev);
+
+ snd_soc_unregister_codec(dev);
+ pm_runtime_disable(dev);
+ regulator_disable(max9860->dvddio);
+ return 0;
+}
+
+static const struct i2c_device_id max9860_i2c_id[] = {
+ { "max9860", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max9860_i2c_id);
+
+static const struct of_device_id max9860_of_match[] = {
+ { .compatible = "maxim,max9860", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max9860_of_match);
+
+static struct i2c_driver max9860_i2c_driver = {
+ .probe = max9860_probe,
+ .remove = max9860_remove,
+ .id_table = max9860_i2c_id,
+ .driver = {
+ .name = "max9860",
+ .of_match_table = max9860_of_match,
+ .pm = &max9860_pm_ops,
+ },
+};
+
+module_i2c_driver(max9860_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC MAX9860 Mono Audio Voice Codec driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * Driver for the MAX9860 Mono Audio Voice Codec
+ *
+ * Author: Peter Rosin <peda@axentia.s>
+ * Copyright 2016 Axentia Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _SND_SOC_MAX9860
+#define _SND_SOC_MAX9860
+
+#define MAX9860_INTRSTATUS 0x00
+#define MAX9860_MICREADBACK 0x01
+#define MAX9860_INTEN 0x02
+#define MAX9860_SYSCLK 0x03
+#define MAX9860_AUDIOCLKHIGH 0x04
+#define MAX9860_AUDIOCLKLOW 0x05
+#define MAX9860_IFC1A 0x06
+#define MAX9860_IFC1B 0x07
+#define MAX9860_VOICEFLTR 0x08
+#define MAX9860_DACATTN 0x09
+#define MAX9860_ADCLEVEL 0x0a
+#define MAX9860_DACGAIN 0x0b
+#define MAX9860_MICGAIN 0x0c
+#define MAX9860_RESERVED 0x0d
+#define MAX9860_MICADC 0x0e
+#define MAX9860_NOISEGATE 0x0f
+#define MAX9860_PWRMAN 0x10
+#define MAX9860_REVISION 0xff
+
+#define MAX9860_MAX_REGISTER 0xff
+
+/* INTRSTATUS */
+#define MAX9860_CLD 0x80
+#define MAX9860_SLD 0x40
+#define MAX9860_ULK 0x20
+
+/* MICREADBACK */
+#define MAX9860_NG 0xe0
+#define MAX9860_AGC 0x1f
+
+/* INTEN */
+#define MAX9860_ICLD 0x80
+#define MAX9860_ISLD 0x40
+#define MAX9860_IULK 0x20
+
+/* SYSCLK */
+#define MAX9860_PSCLK 0x30
+#define MAX9860_PSCLK_OFF 0x00
+#define MAX9860_PSCLK_SHIFT 4
+#define MAX9860_FREQ 0x06
+#define MAX9860_FREQ_NORMAL 0x00
+#define MAX9860_FREQ_12MHZ 0x02
+#define MAX9860_FREQ_13MHZ 0x04
+#define MAX9860_FREQ_19_2MHZ 0x06
+#define MAX9860_16KHZ 0x01
+
+/* AUDIOCLKHIGH */
+#define MAX9860_PLL 0x80
+#define MAX9860_NHI 0x7f
+
+/* AUDIOCLKLOW */
+#define MAX9860_NLO 0xff
+
+/* IFC1A */
+#define MAX9860_MASTER 0x80
+#define MAX9860_WCI 0x40
+#define MAX9860_DBCI 0x20
+#define MAX9860_DDLY 0x10
+#define MAX9860_HIZ 0x08
+#define MAX9860_TDM 0x04
+
+/* IFC1B */
+#define MAX9860_ABCI 0x20
+#define MAX9860_ADLY 0x10
+#define MAX9860_ST 0x08
+#define MAX9860_BSEL 0x07
+#define MAX9860_BSEL_OFF 0x00
+#define MAX9860_BSEL_64X 0x01
+#define MAX9860_BSEL_48X 0x02
+#define MAX9860_BSEL_PCLK_2 0x04
+#define MAX9860_BSEL_PCLK_4 0x05
+#define MAX9860_BSEL_PCLK_8 0x06
+#define MAX9860_BSEL_PCLK_16 0x07
+
+/* VOICEFLTR */
+#define MAX9860_AVFLT 0xf0
+#define MAX9860_AVFLT_SHIFT 4
+#define MAX9860_AVFLT_COUNT 6
+#define MAX9860_DVFLT 0x0f
+#define MAX9860_DVFLT_SHIFT 0
+#define MAX9860_DVFLT_COUNT 6
+
+/* DACATTN */
+#define MAX9860_DVA 0xfe
+#define MAX9860_DVA_SHIFT 1
+#define MAX9860_DVA_MUTE 0x5e
+
+/* ADCLEVEL */
+#define MAX9860_ADCRL 0xf0
+#define MAX9860_ADCRL_SHIFT 4
+#define MAX9860_ADCLL 0x0f
+#define MAX9860_ADCLL_SHIFT 0
+#define MAX9860_ADCxL_MIN 15
+
+/* DACGAIN */
+#define MAX9860_DVG 0x60
+#define MAX9860_DVG_SHIFT 5
+#define MAX9860_DVG_MAX 3
+#define MAX9860_DVST 0x1f
+#define MAX9860_DVST_SHIFT 0
+#define MAX9860_DVST_MIN 31
+
+/* MICGAIN */
+#define MAX9860_PAM 0x60
+#define MAX9860_PAM_SHIFT 5
+#define MAX9860_PAM_MAX 3
+#define MAX9860_PGAM 0x1f
+#define MAX9860_PGAM_SHIFT 0
+#define MAX9860_PGAM_MIN 20
+
+/* MICADC */
+#define MAX9860_AGCSRC 0x80
+#define MAX9860_AGCSRC_SHIFT 7
+#define MAX9860_AGCSRC_COUNT 2
+#define MAX9860_AGCRLS 0x70
+#define MAX9860_AGCRLS_SHIFT 4
+#define MAX9860_AGCRLS_COUNT 8
+#define MAX9860_AGCATK 0x0c
+#define MAX9860_AGCATK_SHIFT 2
+#define MAX9860_AGCATK_COUNT 4
+#define MAX9860_AGCHLD 0x03
+#define MAX9860_AGCHLD_OFF 0x00
+#define MAX9860_AGCHLD_SHIFT 0
+#define MAX9860_AGCHLD_COUNT 4
+
+/* NOISEGATE */
+#define MAX9860_ANTH 0xf0
+#define MAX9860_ANTH_SHIFT 4
+#define MAX9860_ANTH_MAX 15
+#define MAX9860_AGCTH 0x0f
+#define MAX9860_AGCTH_SHIFT 0
+#define MAX9860_AGCTH_MIN 15
+
+/* PWRMAN */
+#define MAX9860_SHDN 0x80
+#define MAX9860_DACEN 0x08
+#define MAX9860_DACEN_SHIFT 3
+#define MAX9860_ADCLEN 0x02
+#define MAX9860_ADCLEN_SHIFT 1
+#define MAX9860_ADCREN 0x01
+#define MAX9860_ADCREN_SHIFT 0
+
+#endif /* _SND_SOC_MAX9860 */
#define MAX9877_BYPASS (1 << 6)
#define MAX9877_SHDN (1 << 7)
-extern int max9877_add_controls(struct snd_soc_codec *codec);
-
#endif
#include <linux/clk.h>
#include <linux/acpi.h>
#include <linux/math64.h>
+#include <linux/semaphore.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "nau8825.h"
+
+#define NUVOTON_CODEC_DAI "nau8825-hifi"
+
#define NAU_FREF_MAX 13500000
-#define NAU_FVCO_MAX 100000000
+#define NAU_FVCO_MAX 124000000
#define NAU_FVCO_MIN 90000000
+/* cross talk suppression detection */
+#define LOG10_MAGIC 646456993
+#define GAIN_AUGMENT 22500
+#define SIDETONE_BASE 207000
+
+
+static int nau8825_configure_sysclk(struct nau8825 *nau8825,
+ int clk_id, unsigned int freq);
+
struct nau8825_fll {
int mclk_src;
int ratio;
{ NAU8825_REG_CHARGE_PUMP, 0x0 },
};
+/* register backup table when cross talk detection */
+static struct reg_default nau8825_xtalk_baktab[] = {
+ { NAU8825_REG_ADC_DGAIN_CTRL, 0 },
+ { NAU8825_REG_HSVOL_CTRL, 0 },
+ { NAU8825_REG_DACL_CTRL, 0 },
+ { NAU8825_REG_DACR_CTRL, 0 },
+};
+
+static const unsigned short logtable[256] = {
+ 0x0000, 0x0171, 0x02e0, 0x044e, 0x05ba, 0x0725, 0x088e, 0x09f7,
+ 0x0b5d, 0x0cc3, 0x0e27, 0x0f8a, 0x10eb, 0x124b, 0x13aa, 0x1508,
+ 0x1664, 0x17bf, 0x1919, 0x1a71, 0x1bc8, 0x1d1e, 0x1e73, 0x1fc6,
+ 0x2119, 0x226a, 0x23ba, 0x2508, 0x2656, 0x27a2, 0x28ed, 0x2a37,
+ 0x2b80, 0x2cc8, 0x2e0f, 0x2f54, 0x3098, 0x31dc, 0x331e, 0x345f,
+ 0x359f, 0x36de, 0x381b, 0x3958, 0x3a94, 0x3bce, 0x3d08, 0x3e41,
+ 0x3f78, 0x40af, 0x41e4, 0x4319, 0x444c, 0x457f, 0x46b0, 0x47e1,
+ 0x4910, 0x4a3f, 0x4b6c, 0x4c99, 0x4dc5, 0x4eef, 0x5019, 0x5142,
+ 0x526a, 0x5391, 0x54b7, 0x55dc, 0x5700, 0x5824, 0x5946, 0x5a68,
+ 0x5b89, 0x5ca8, 0x5dc7, 0x5ee5, 0x6003, 0x611f, 0x623a, 0x6355,
+ 0x646f, 0x6588, 0x66a0, 0x67b7, 0x68ce, 0x69e4, 0x6af8, 0x6c0c,
+ 0x6d20, 0x6e32, 0x6f44, 0x7055, 0x7165, 0x7274, 0x7383, 0x7490,
+ 0x759d, 0x76aa, 0x77b5, 0x78c0, 0x79ca, 0x7ad3, 0x7bdb, 0x7ce3,
+ 0x7dea, 0x7ef0, 0x7ff6, 0x80fb, 0x81ff, 0x8302, 0x8405, 0x8507,
+ 0x8608, 0x8709, 0x8809, 0x8908, 0x8a06, 0x8b04, 0x8c01, 0x8cfe,
+ 0x8dfa, 0x8ef5, 0x8fef, 0x90e9, 0x91e2, 0x92db, 0x93d2, 0x94ca,
+ 0x95c0, 0x96b6, 0x97ab, 0x98a0, 0x9994, 0x9a87, 0x9b7a, 0x9c6c,
+ 0x9d5e, 0x9e4f, 0x9f3f, 0xa02e, 0xa11e, 0xa20c, 0xa2fa, 0xa3e7,
+ 0xa4d4, 0xa5c0, 0xa6ab, 0xa796, 0xa881, 0xa96a, 0xaa53, 0xab3c,
+ 0xac24, 0xad0c, 0xadf2, 0xaed9, 0xafbe, 0xb0a4, 0xb188, 0xb26c,
+ 0xb350, 0xb433, 0xb515, 0xb5f7, 0xb6d9, 0xb7ba, 0xb89a, 0xb97a,
+ 0xba59, 0xbb38, 0xbc16, 0xbcf4, 0xbdd1, 0xbead, 0xbf8a, 0xc065,
+ 0xc140, 0xc21b, 0xc2f5, 0xc3cf, 0xc4a8, 0xc580, 0xc658, 0xc730,
+ 0xc807, 0xc8de, 0xc9b4, 0xca8a, 0xcb5f, 0xcc34, 0xcd08, 0xcddc,
+ 0xceaf, 0xcf82, 0xd054, 0xd126, 0xd1f7, 0xd2c8, 0xd399, 0xd469,
+ 0xd538, 0xd607, 0xd6d6, 0xd7a4, 0xd872, 0xd93f, 0xda0c, 0xdad9,
+ 0xdba5, 0xdc70, 0xdd3b, 0xde06, 0xded0, 0xdf9a, 0xe063, 0xe12c,
+ 0xe1f5, 0xe2bd, 0xe385, 0xe44c, 0xe513, 0xe5d9, 0xe69f, 0xe765,
+ 0xe82a, 0xe8ef, 0xe9b3, 0xea77, 0xeb3b, 0xebfe, 0xecc1, 0xed83,
+ 0xee45, 0xef06, 0xefc8, 0xf088, 0xf149, 0xf209, 0xf2c8, 0xf387,
+ 0xf446, 0xf505, 0xf5c3, 0xf680, 0xf73e, 0xf7fb, 0xf8b7, 0xf973,
+ 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47
+};
+
+static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm);
+ struct snd_soc_component *component = &codec->component;
+ struct snd_soc_dai *codec_dai, *_dai;
+
+ list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) {
+ if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI,
+ strlen(NUVOTON_CODEC_DAI)))
+ return codec_dai;
+ }
+ return NULL;
+}
+
+static bool nau8825_dai_is_active(struct nau8825 *nau8825)
+{
+ struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825);
+
+ if (codec_dai) {
+ if (codec_dai->playback_active || codec_dai->capture_active)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * nau8825_sema_acquire - acquire the semaphore of nau88l25
+ * @nau8825: component to register the codec private data with
+ * @timeout: how long in jiffies to wait before failure or zero to wait
+ * until release
+ *
+ * Attempts to acquire the semaphore with number of jiffies. If no more
+ * tasks are allowed to acquire the semaphore, calling this function will
+ * put the task to sleep. If the semaphore is not released within the
+ * specified number of jiffies, this function returns.
+ * Acquires the semaphore without jiffies. If no more tasks are allowed
+ * to acquire the semaphore, calling this function will put the task to
+ * sleep until the semaphore is released.
+ * It returns if the semaphore was acquired.
+ */
+static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
+{
+ int ret;
+
+ if (timeout)
+ ret = down_timeout(&nau8825->xtalk_sem, timeout);
+ else
+ ret = down_interruptible(&nau8825->xtalk_sem);
+
+ if (ret < 0)
+ dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+}
+
+/**
+ * nau8825_sema_release - release the semaphore of nau88l25
+ * @nau8825: component to register the codec private data with
+ *
+ * Release the semaphore which may be called from any context and
+ * even by tasks which have never called down().
+ */
+static inline void nau8825_sema_release(struct nau8825 *nau8825)
+{
+ up(&nau8825->xtalk_sem);
+}
+
+/**
+ * nau8825_sema_reset - reset the semaphore for nau88l25
+ * @nau8825: component to register the codec private data with
+ *
+ * Reset the counter of the semaphore. Call this function to restart
+ * a new round task management.
+ */
+static inline void nau8825_sema_reset(struct nau8825 *nau8825)
+{
+ nau8825->xtalk_sem.count = 1;
+}
+
+/**
+ * Ramp up the headphone volume change gradually to target level.
+ *
+ * @nau8825: component to register the codec private data with
+ * @vol_from: the volume to start up
+ * @vol_to: the target volume
+ * @step: the volume span to move on
+ *
+ * The headphone volume is from 0dB to minimum -54dB and -1dB per step.
+ * If the volume changes sharp, there is a pop noise heard in headphone. We
+ * provide the function to ramp up the volume up or down by delaying 10ms
+ * per step.
+ */
+static void nau8825_hpvol_ramp(struct nau8825 *nau8825,
+ unsigned int vol_from, unsigned int vol_to, unsigned int step)
+{
+ unsigned int value, volume, ramp_up, from, to;
+
+ if (vol_from == vol_to || step == 0) {
+ return;
+ } else if (vol_from < vol_to) {
+ ramp_up = true;
+ from = vol_from;
+ to = vol_to;
+ } else {
+ ramp_up = false;
+ from = vol_to;
+ to = vol_from;
+ }
+ /* only handle volume from 0dB to minimum -54dB */
+ if (to > NAU8825_HP_VOL_MIN)
+ to = NAU8825_HP_VOL_MIN;
+
+ for (volume = from; volume < to; volume += step) {
+ if (ramp_up)
+ value = volume;
+ else
+ value = to - volume + from;
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSVOL_CTRL,
+ NAU8825_HPL_VOL_MASK | NAU8825_HPR_VOL_MASK,
+ (value << NAU8825_HPL_VOL_SFT) | value);
+ usleep_range(10000, 10500);
+ }
+ if (ramp_up)
+ value = to;
+ else
+ value = from;
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSVOL_CTRL,
+ NAU8825_HPL_VOL_MASK | NAU8825_HPR_VOL_MASK,
+ (value << NAU8825_HPL_VOL_SFT) | value);
+}
+
+/**
+ * Computes log10 of a value; the result is round off to 3 decimal. This func-
+ * tion takes reference to dvb-math. The source code locates as the following.
+ * Linux/drivers/media/dvb-core/dvb_math.c
+ *
+ * return log10(value) * 1000
+ */
+static u32 nau8825_intlog10_dec3(u32 value)
+{
+ u32 msb, logentry, significand, interpolation, log10val;
+ u64 log2val;
+
+ /* first detect the msb (count begins at 0) */
+ msb = fls(value) - 1;
+ /**
+ * now we use a logtable after the following method:
+ *
+ * log2(2^x * y) * 2^24 = x * 2^24 + log2(y) * 2^24
+ * where x = msb and therefore 1 <= y < 2
+ * first y is determined by shifting the value left
+ * so that msb is bit 31
+ * 0x00231f56 -> 0x8C7D5800
+ * the result is y * 2^31 -> "significand"
+ * then the highest 9 bits are used for a table lookup
+ * the highest bit is discarded because it's always set
+ * the highest nine bits in our example are 100011000
+ * so we would use the entry 0x18
+ */
+ significand = value << (31 - msb);
+ logentry = (significand >> 23) & 0xff;
+ /**
+ * last step we do is interpolation because of the
+ * limitations of the log table the error is that part of
+ * the significand which isn't used for lookup then we
+ * compute the ratio between the error and the next table entry
+ * and interpolate it between the log table entry used and the
+ * next one the biggest error possible is 0x7fffff
+ * (in our example it's 0x7D5800)
+ * needed value for next table entry is 0x800000
+ * so the interpolation is
+ * (error / 0x800000) * (logtable_next - logtable_current)
+ * in the implementation the division is moved to the end for
+ * better accuracy there is also an overflow correction if
+ * logtable_next is 256
+ */
+ interpolation = ((significand & 0x7fffff) *
+ ((logtable[(logentry + 1) & 0xff] -
+ logtable[logentry]) & 0xffff)) >> 15;
+
+ log2val = ((msb << 24) + (logtable[logentry] << 8) + interpolation);
+ /**
+ * log10(x) = log2(x) * log10(2)
+ */
+ log10val = (log2val * LOG10_MAGIC) >> 31;
+ /**
+ * the result is round off to 3 decimal
+ */
+ return log10val / ((1 << 24) / 1000);
+}
+
+/**
+ * computes cross talk suppression sidetone gain.
+ *
+ * @sig_org: orignal signal level
+ * @sig_cros: cross talk signal level
+ *
+ * The orignal and cross talk signal vlues need to be characterized.
+ * Once these values have been characterized, this sidetone value
+ * can be converted to decibel with the equation below.
+ * sidetone = 20 * log (original signal level / crosstalk signal level)
+ *
+ * return cross talk sidetone gain
+ */
+static u32 nau8825_xtalk_sidetone(u32 sig_org, u32 sig_cros)
+{
+ u32 gain, sidetone;
+
+ if (unlikely(sig_org == 0) || unlikely(sig_cros == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ sig_org = nau8825_intlog10_dec3(sig_org);
+ sig_cros = nau8825_intlog10_dec3(sig_cros);
+ if (sig_org >= sig_cros)
+ gain = (sig_org - sig_cros) * 20 + GAIN_AUGMENT;
+ else
+ gain = (sig_cros - sig_org) * 20 + GAIN_AUGMENT;
+ sidetone = SIDETONE_BASE - gain * 2;
+ sidetone /= 1000;
+
+ return sidetone;
+}
+
+static int nau8825_xtalk_baktab_index_by_reg(unsigned int reg)
+{
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(nau8825_xtalk_baktab); index++)
+ if (nau8825_xtalk_baktab[index].reg == reg)
+ return index;
+ return -EINVAL;
+}
+
+static void nau8825_xtalk_backup(struct nau8825 *nau8825)
+{
+ int i;
+
+ /* Backup some register values to backup table */
+ for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++)
+ regmap_read(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
+ &nau8825_xtalk_baktab[i].def);
+}
+
+static void nau8825_xtalk_restore(struct nau8825 *nau8825)
+{
+ int i, volume;
+
+ /* Restore register values from backup table; When the driver restores
+ * the headphone volumem, it needs recover to original level gradually
+ * with 3dB per step for less pop noise.
+ */
+ for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++) {
+ if (nau8825_xtalk_baktab[i].reg == NAU8825_REG_HSVOL_CTRL) {
+ /* Ramping up the volume change to reduce pop noise */
+ volume = nau8825_xtalk_baktab[i].def &
+ NAU8825_HPR_VOL_MASK;
+ nau8825_hpvol_ramp(nau8825, 0, volume, 3);
+ continue;
+ }
+ regmap_write(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
+ nau8825_xtalk_baktab[i].def);
+ }
+}
+
+static void nau8825_xtalk_prepare_dac(struct nau8825 *nau8825)
+{
+ /* Enable power of DAC path */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_DACR | NAU8825_ENABLE_DACL |
+ NAU8825_ENABLE_ADC | NAU8825_ENABLE_ADC_CLK |
+ NAU8825_ENABLE_DAC_CLK, NAU8825_ENABLE_DACR |
+ NAU8825_ENABLE_DACL | NAU8825_ENABLE_ADC |
+ NAU8825_ENABLE_ADC_CLK | NAU8825_ENABLE_DAC_CLK);
+ /* Prevent startup click by letting charge pump to ramp up and
+ * change bump enable
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_JAMNODCLOW | NAU8825_CHANRGE_PUMP_EN,
+ NAU8825_JAMNODCLOW | NAU8825_CHANRGE_PUMP_EN);
+ /* Enable clock sync of DAC and DAC clock */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_RDAC,
+ NAU8825_RDAC_EN | NAU8825_RDAC_CLK_EN |
+ NAU8825_RDAC_FS_BCLK_ENB,
+ NAU8825_RDAC_EN | NAU8825_RDAC_CLK_EN);
+ /* Power up output driver with 2 stage */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_INTEGR_R | NAU8825_POWERUP_INTEGR_L |
+ NAU8825_POWERUP_DRV_IN_R | NAU8825_POWERUP_DRV_IN_L,
+ NAU8825_POWERUP_INTEGR_R | NAU8825_POWERUP_INTEGR_L |
+ NAU8825_POWERUP_DRV_IN_R | NAU8825_POWERUP_DRV_IN_L);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_HP_DRV_R | NAU8825_POWERUP_HP_DRV_L,
+ NAU8825_POWERUP_HP_DRV_R | NAU8825_POWERUP_HP_DRV_L);
+ /* HP outputs not shouted to ground */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSD_CTRL,
+ NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L, 0);
+ /* Enable HP boost driver */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BOOST,
+ NAU8825_HP_BOOST_DIS, NAU8825_HP_BOOST_DIS);
+ /* Enable class G compare path to supply 1.8V or 0.9V. */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLASSG_CTRL,
+ NAU8825_CLASSG_LDAC_EN | NAU8825_CLASSG_RDAC_EN,
+ NAU8825_CLASSG_LDAC_EN | NAU8825_CLASSG_RDAC_EN);
+}
+
+static void nau8825_xtalk_prepare_adc(struct nau8825 *nau8825)
+{
+ /* Power up left ADC and raise 5dB than Vmid for Vref */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ANALOG_ADC_2,
+ NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK,
+ NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_VMID_PLUS_0_5DB);
+}
+
+static void nau8825_xtalk_clock(struct nau8825 *nau8825)
+{
+ /* Recover FLL default value */
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL1, 0x0);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL2, 0x3126);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL3, 0x0008);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL4, 0x0010);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL5, 0x0);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL6, 0x6000);
+ /* Enable internal VCO clock for detection signal generated */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6, NAU8825_DCO_EN,
+ NAU8825_DCO_EN);
+ /* Given specific clock frequency of internal clock to
+ * generate signal.
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_MCLK_SRC_MASK, 0xf);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL1,
+ NAU8825_FLL_RATIO_MASK, 0x10);
+}
+
+static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
+{
+ int volume, index;
+
+ /* Backup those registers changed by cross talk detection */
+ nau8825_xtalk_backup(nau8825);
+ /* Config IIS as master to output signal by codec */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
+ (0x2 << NAU8825_I2S_DRV_SFT) | 0x1);
+ /* Ramp up headphone volume to 0dB to get better performance and
+ * avoid pop noise in headphone.
+ */
+ index = nau8825_xtalk_baktab_index_by_reg(NAU8825_REG_HSVOL_CTRL);
+ if (index != -EINVAL) {
+ volume = nau8825_xtalk_baktab[index].def &
+ NAU8825_HPR_VOL_MASK;
+ nau8825_hpvol_ramp(nau8825, volume, 0, 3);
+ }
+ nau8825_xtalk_clock(nau8825);
+ nau8825_xtalk_prepare_dac(nau8825);
+ nau8825_xtalk_prepare_adc(nau8825);
+ /* Config channel path and digital gain */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACL_CTRL,
+ NAU8825_DACL_CH_SEL_MASK | NAU8825_DACL_CH_VOL_MASK,
+ NAU8825_DACL_CH_SEL_L | 0xab);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACR_CTRL,
+ NAU8825_DACR_CH_SEL_MASK | NAU8825_DACR_CH_VOL_MASK,
+ NAU8825_DACR_CH_SEL_R | 0xab);
+ /* Config cross talk parameters and generate the 23Hz sine wave with
+ * 1/16 full scale of signal level for impedance measurement.
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_IMM_MODE_CTRL,
+ NAU8825_IMM_THD_MASK | NAU8825_IMM_GEN_VOL_MASK |
+ NAU8825_IMM_CYC_MASK | NAU8825_IMM_DAC_SRC_MASK,
+ (0x9 << NAU8825_IMM_THD_SFT) | NAU8825_IMM_GEN_VOL_1_16th |
+ NAU8825_IMM_CYC_8192 | NAU8825_IMM_DAC_SRC_SIN);
+ /* RMS intrruption enable */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_INTERRUPT_MASK, NAU8825_IRQ_RMS_EN, 0);
+ /* Power up left and right DAC */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL, 0);
+}
+
+static void nau8825_xtalk_clean_dac(struct nau8825 *nau8825)
+{
+ /* Disable HP boost driver */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BOOST,
+ NAU8825_HP_BOOST_DIS, 0);
+ /* HP outputs shouted to ground */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSD_CTRL,
+ NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L,
+ NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L);
+ /* Power down left and right DAC */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL,
+ NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL);
+ /* Enable the TESTDAC and disable L/R HP impedance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_HPR_IMP | NAU8825_BIAS_HPL_IMP |
+ NAU8825_BIAS_TESTDAC_EN, NAU8825_BIAS_TESTDAC_EN);
+ /* Power down output driver with 2 stage */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_HP_DRV_R | NAU8825_POWERUP_HP_DRV_L, 0);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_INTEGR_R | NAU8825_POWERUP_INTEGR_L |
+ NAU8825_POWERUP_DRV_IN_R | NAU8825_POWERUP_DRV_IN_L, 0);
+ /* Disable clock sync of DAC and DAC clock */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_RDAC,
+ NAU8825_RDAC_EN | NAU8825_RDAC_CLK_EN, 0);
+ /* Disable charge pump ramp up function and change bump */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_JAMNODCLOW | NAU8825_CHANRGE_PUMP_EN, 0);
+ /* Disable power of DAC path */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_DACR | NAU8825_ENABLE_DACL |
+ NAU8825_ENABLE_ADC_CLK | NAU8825_ENABLE_DAC_CLK, 0);
+ if (!nau8825->irq)
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_ENA_CTRL, NAU8825_ENABLE_ADC, 0);
+}
+
+static void nau8825_xtalk_clean_adc(struct nau8825 *nau8825)
+{
+ /* Power down left ADC and restore voltage to Vmid */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ANALOG_ADC_2,
+ NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK, 0);
+}
+
+static void nau8825_xtalk_clean(struct nau8825 *nau8825)
+{
+ /* Enable internal VCO needed for interruptions */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
+ nau8825_xtalk_clean_dac(nau8825);
+ nau8825_xtalk_clean_adc(nau8825);
+ /* Clear cross talk parameters and disable */
+ regmap_write(nau8825->regmap, NAU8825_REG_IMM_MODE_CTRL, 0);
+ /* RMS intrruption disable */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
+ /* Recover default value for IIS */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
+ /* Restore value of specific register for cross talk */
+ nau8825_xtalk_restore(nau8825);
+}
+
+static void nau8825_xtalk_imm_start(struct nau8825 *nau8825, int vol)
+{
+ /* Apply ADC volume for better cross talk performance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ADC_DGAIN_CTRL,
+ NAU8825_ADC_DIG_VOL_MASK, vol);
+ /* Disables JKTIP(HPL) DAC channel for right to left measurement.
+ * Do it before sending signal in order to erase pop noise.
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_TESTDACR_EN | NAU8825_BIAS_TESTDACL_EN,
+ NAU8825_BIAS_TESTDACL_EN);
+ switch (nau8825->xtalk_state) {
+ case NAU8825_XTALK_HPR_R2L:
+ /* Enable right headphone impedance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_HPR_IMP | NAU8825_BIAS_HPL_IMP,
+ NAU8825_BIAS_HPR_IMP);
+ break;
+ case NAU8825_XTALK_HPL_R2L:
+ /* Enable left headphone impedance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_HPR_IMP | NAU8825_BIAS_HPL_IMP,
+ NAU8825_BIAS_HPL_IMP);
+ break;
+ default:
+ break;
+ }
+ msleep(100);
+ /* Impedance measurement mode enable */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_IMM_MODE_CTRL,
+ NAU8825_IMM_EN, NAU8825_IMM_EN);
+}
+
+static void nau8825_xtalk_imm_stop(struct nau8825 *nau8825)
+{
+ /* Impedance measurement mode disable */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_IMM_MODE_CTRL, NAU8825_IMM_EN, 0);
+}
+
+/* The cross talk measurement function can reduce cross talk across the
+ * JKTIP(HPL) and JKR1(HPR) outputs which measures the cross talk signal
+ * level to determine what cross talk reduction gain is. This system works by
+ * sending a 23Hz -24dBV sine wave into the headset output DAC and through
+ * the PGA. The output of the PGA is then connected to an internal current
+ * sense which measures the attenuated 23Hz signal and passing the output to
+ * an ADC which converts the measurement to a binary code. With two separated
+ * measurement, one for JKR1(HPR) and the other JKTIP(HPL), measurement data
+ * can be separated read in IMM_RMS_L for HSR and HSL after each measurement.
+ * Thus, the measurement function has four states to complete whole sequence.
+ * 1. Prepare state : Prepare the resource for detection and transfer to HPR
+ * IMM stat to make JKR1(HPR) impedance measure.
+ * 2. HPR IMM state : Read out orignal signal level of JKR1(HPR) and transfer
+ * to HPL IMM state to make JKTIP(HPL) impedance measure.
+ * 3. HPL IMM state : Read out cross talk signal level of JKTIP(HPL) and
+ * transfer to IMM state to determine suppression sidetone gain.
+ * 4. IMM state : Computes cross talk suppression sidetone gain with orignal
+ * and cross talk signal level. Apply this gain and then restore codec
+ * configuration. Then transfer to Done state for ending.
+ */
+static void nau8825_xtalk_measure(struct nau8825 *nau8825)
+{
+ u32 sidetone;
+
+ switch (nau8825->xtalk_state) {
+ case NAU8825_XTALK_PREPARE:
+ /* In prepare state, set up clock, intrruption, DAC path, ADC
+ * path and cross talk detection parameters for preparation.
+ */
+ nau8825_xtalk_prepare(nau8825);
+ msleep(280);
+ /* Trigger right headphone impedance detection */
+ nau8825->xtalk_state = NAU8825_XTALK_HPR_R2L;
+ nau8825_xtalk_imm_start(nau8825, 0x00d2);
+ break;
+ case NAU8825_XTALK_HPR_R2L:
+ /* In right headphone IMM state, read out right headphone
+ * impedance measure result, and then start up left side.
+ */
+ regmap_read(nau8825->regmap, NAU8825_REG_IMM_RMS_L,
+ &nau8825->imp_rms[NAU8825_XTALK_HPR_R2L]);
+ dev_dbg(nau8825->dev, "HPR_R2L imm: %x\n",
+ nau8825->imp_rms[NAU8825_XTALK_HPR_R2L]);
+ /* Disable then re-enable IMM mode to update */
+ nau8825_xtalk_imm_stop(nau8825);
+ /* Trigger left headphone impedance detection */
+ nau8825->xtalk_state = NAU8825_XTALK_HPL_R2L;
+ nau8825_xtalk_imm_start(nau8825, 0x00ff);
+ break;
+ case NAU8825_XTALK_HPL_R2L:
+ /* In left headphone IMM state, read out left headphone
+ * impedance measure result, and delay some time to wait
+ * detection sine wave output finish. Then, we can calculate
+ * the cross talk suppresstion side tone according to the L/R
+ * headphone imedance.
+ */
+ regmap_read(nau8825->regmap, NAU8825_REG_IMM_RMS_L,
+ &nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]);
+ dev_dbg(nau8825->dev, "HPL_R2L imm: %x\n",
+ nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]);
+ nau8825_xtalk_imm_stop(nau8825);
+ msleep(150);
+ nau8825->xtalk_state = NAU8825_XTALK_IMM;
+ break;
+ case NAU8825_XTALK_IMM:
+ /* In impedance measure state, the orignal and cross talk
+ * signal level vlues are ready. The side tone gain is deter-
+ * mined with these signal level. After all, restore codec
+ * configuration.
+ */
+ sidetone = nau8825_xtalk_sidetone(
+ nau8825->imp_rms[NAU8825_XTALK_HPR_R2L],
+ nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]);
+ dev_dbg(nau8825->dev, "cross talk sidetone: %x\n", sidetone);
+ regmap_write(nau8825->regmap, NAU8825_REG_DAC_DGAIN_CTRL,
+ (sidetone << 8) | sidetone);
+ nau8825_xtalk_clean(nau8825);
+ nau8825->xtalk_state = NAU8825_XTALK_DONE;
+ break;
+ default:
+ break;
+ }
+}
+
+static void nau8825_xtalk_work(struct work_struct *work)
+{
+ struct nau8825 *nau8825 = container_of(
+ work, struct nau8825, xtalk_work);
+
+ nau8825_xtalk_measure(nau8825);
+ /* To determine the cross talk side tone gain when reach
+ * the impedance measure state.
+ */
+ if (nau8825->xtalk_state == NAU8825_XTALK_IMM)
+ nau8825_xtalk_measure(nau8825);
+
+ /* Delay jack report until cross talk detection process
+ * completed. It can avoid application to do playback
+ * preparation before cross talk detection is still working.
+ * Meanwhile, the protection of the cross talk detection
+ * is released.
+ */
+ if (nau8825->xtalk_state == NAU8825_XTALK_DONE) {
+ snd_soc_jack_report(nau8825->jack, nau8825->xtalk_event,
+ nau8825->xtalk_event_mask);
+ nau8825_sema_release(nau8825);
+ nau8825->xtalk_protect = false;
+ }
+}
+
+static void nau8825_xtalk_cancel(struct nau8825 *nau8825)
+{
+ /* If the xtalk_protect is true, that means the process is still
+ * on going. The driver forces to cancel the cross talk task and
+ * restores the configuration to original status.
+ */
+ if (nau8825->xtalk_protect) {
+ cancel_work_sync(&nau8825->xtalk_work);
+ nau8825_xtalk_clean(nau8825);
+ }
+ /* Reset parameters for cross talk suppression function */
+ nau8825_sema_reset(nau8825);
+ nau8825->xtalk_state = NAU8825_XTALK_DONE;
+ nau8825->xtalk_protect = false;
+}
+
static bool nau8825_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case NAU8825_REG_SARDOUT_RAM_STATUS:
case NAU8825_REG_CHARGE_PUMP_INPUT_READ:
case NAU8825_REG_GENERAL_STATUS:
+ case NAU8825_REG_BIQ_CTRL ... NAU8825_REG_BIQ_COF10:
return true;
default:
return false;
}
}
+static int nau8825_adc_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (!nau8825->irq)
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_ENA_CTRL, NAU8825_ENABLE_ADC, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nau8825_pump_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
return 0;
}
+static int nau8825_biq_coeff_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+
+ if (!component->regmap)
+ return -EINVAL;
+
+ regmap_raw_read(component->regmap, NAU8825_REG_BIQ_COF1,
+ ucontrol->value.bytes.data, params->max);
+ return 0;
+}
+
+static int nau8825_biq_coeff_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ void *data;
+
+ if (!component->regmap)
+ return -EINVAL;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ regmap_update_bits(component->regmap, NAU8825_REG_BIQ_CTRL,
+ NAU8825_BIQ_WRT_EN, 0);
+ regmap_raw_write(component->regmap, NAU8825_REG_BIQ_COF1,
+ data, params->max);
+ regmap_update_bits(component->regmap, NAU8825_REG_BIQ_CTRL,
+ NAU8825_BIQ_WRT_EN, NAU8825_BIQ_WRT_EN);
+
+ kfree(data);
+ return 0;
+}
+
+static const char * const nau8825_biq_path[] = {
+ "ADC", "DAC"
+};
+
+static const struct soc_enum nau8825_biq_path_enum =
+ SOC_ENUM_SINGLE(NAU8825_REG_BIQ_CTRL, NAU8825_BIQ_PATH_SFT,
+ ARRAY_SIZE(nau8825_biq_path), nau8825_biq_path);
+
static const char * const nau8825_adc_decimation[] = {
"32", "64", "128", "256"
};
SOC_ENUM("ADC Decimation Rate", nau8825_adc_decimation_enum),
SOC_ENUM("DAC Oversampling Rate", nau8825_dac_oversampl_enum),
+ /* programmable biquad filter */
+ SOC_ENUM("BIQ Path Select", nau8825_biq_path_enum),
+ SND_SOC_BYTES_EXT("BIQ Coefficients", 20,
+ nau8825_biq_coeff_get, nau8825_biq_coeff_put),
};
/* DAC Mux 0x33[9] and 0x34[9] */
SND_SOC_DAPM_PGA("Frontend PGA", NAU8825_REG_POWER_UP_CONTROL, 14, 0,
NULL, 0),
- SND_SOC_DAPM_ADC("ADC", NULL, NAU8825_REG_ENA_CTRL, 8, 0),
+ SND_SOC_DAPM_ADC_E("ADC", NULL, SND_SOC_NOPM, 0, 0,
+ nau8825_adc_event, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ADC Clock", NAU8825_REG_ENA_CTRL, 7, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
0),
NAU8825_HSD_AUTO_MODE | NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L,
NAU8825_HSD_AUTO_MODE | NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L);
- regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
- NAU8825_IRQ_HEADSET_COMPLETE_EN | NAU8825_IRQ_EJECT_EN, 0);
-
return 0;
}
EXPORT_SYMBOL_GPL(nau8825_enable_jack_detect);
static bool nau8825_is_jack_inserted(struct regmap *regmap)
{
- int status;
+ bool active_high, is_high;
+ int status, jkdet;
+ regmap_read(regmap, NAU8825_REG_JACK_DET_CTRL, &jkdet);
+ active_high = jkdet & NAU8825_JACK_POLARITY;
regmap_read(regmap, NAU8825_REG_I2C_DEVICE_ID, &status);
- return !(status & NAU8825_GPIO2JD1);
+ is_high = status & NAU8825_GPIO2JD1;
+ /* return jack connection status according to jack insertion logic
+ * active high or active low.
+ */
+ return active_high == is_high;
}
static void nau8825_restart_jack_detection(struct regmap *regmap)
{
- /* Chip needs one FSCLK cycle in order to generate interrupts,
- * as we cannot guarantee one will be provided by the system. Turning
- * master mode on then off enables us to generate that FSCLK cycle
- * with a minimum of contention on the clock bus.
- */
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
/* this will restart the entire jack detection process including MIC/GND
* switching and create interrupts. We have to go from 0 to 1 and back
* to 0 to restart.
NAU8825_JACK_DET_RESTART, 0);
}
+static void nau8825_int_status_clear_all(struct regmap *regmap)
+{
+ int active_irq, clear_irq, i;
+
+ /* Reset the intrruption status from rightmost bit if the corres-
+ * ponding irq event occurs.
+ */
+ regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+ for (i = 0; i < NAU8825_REG_DATA_LEN; i++) {
+ clear_irq = (0x1 << i);
+ if (active_irq & clear_irq)
+ regmap_write(regmap,
+ NAU8825_REG_INT_CLR_KEY_STATUS, clear_irq);
+ }
+}
+
static void nau8825_eject_jack(struct nau8825 *nau8825)
{
struct snd_soc_dapm_context *dapm = nau8825->dapm;
struct regmap *regmap = nau8825->regmap;
+ /* Force to cancel the cross talk detection process */
+ nau8825_xtalk_cancel(nau8825);
+
snd_soc_dapm_disable_pin(dapm, "SAR");
snd_soc_dapm_disable_pin(dapm, "MICBIAS");
/* Detach 2kOhm Resistors from MICBIAS to MICGND1/2 */
regmap_update_bits(regmap, NAU8825_REG_HSD_CTRL, 0xf, 0xf);
snd_soc_dapm_sync(dapm);
+
+ /* Clear all interruption status */
+ nau8825_int_status_clear_all(regmap);
+
+ /* Enable the insertion interruption, disable the ejection inter-
+ * ruption, and then bypass de-bounce circuit.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_DIS_CTRL,
+ NAU8825_IRQ_EJECT_DIS | NAU8825_IRQ_INSERT_DIS,
+ NAU8825_IRQ_EJECT_DIS);
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_EJECT_EN |
+ NAU8825_IRQ_HEADSET_COMPLETE_EN | NAU8825_IRQ_INSERT_EN,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_EJECT_EN |
+ NAU8825_IRQ_HEADSET_COMPLETE_EN);
+ regmap_update_bits(regmap, NAU8825_REG_JACK_DET_CTRL,
+ NAU8825_JACK_DET_DB_BYPASS, NAU8825_JACK_DET_DB_BYPASS);
+
+ /* Disable ADC needed for interruptions at audo mode */
+ regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, 0);
+
+ /* Close clock for jack type detection at manual mode */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_DIS, 0);
+}
+
+/* Enable audo mode interruptions with internal clock. */
+static void nau8825_setup_auto_irq(struct nau8825 *nau8825)
+{
+ struct regmap *regmap = nau8825->regmap;
+
+ /* Enable headset jack type detection complete interruption and
+ * jack ejection interruption.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_HEADSET_COMPLETE_EN | NAU8825_IRQ_EJECT_EN, 0);
+
+ /* Enable internal VCO needed for interruptions */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
+
+ /* Enable ADC needed for interruptions */
+ regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
+
+ /* Chip needs one FSCLK cycle in order to generate interruptions,
+ * as we cannot guarantee one will be provided by the system. Turning
+ * master mode on then off enables us to generate that FSCLK cycle
+ * with a minimum of contention on the clock bus.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
+ /* Not bypass de-bounce circuit */
+ regmap_update_bits(regmap, NAU8825_REG_JACK_DET_CTRL,
+ NAU8825_JACK_DET_DB_BYPASS, 0);
+
+ /* Unmask all interruptions */
+ regmap_write(regmap, NAU8825_REG_INTERRUPT_DIS_CTRL, 0);
+
+ /* Restart the jack detection process at auto mode */
+ nau8825_restart_jack_detection(regmap);
}
static int nau8825_button_decode(int value)
regmap_read(regmap, NAU8825_REG_GENERAL_STATUS, &jack_status_reg);
mic_detected = (jack_status_reg >> 10) & 3;
+ /* The JKSLV and JKR2 all detected in high impedance headset */
+ if (mic_detected == 0x3)
+ nau8825->high_imped = true;
+ else
+ nau8825->high_imped = false;
switch (mic_detected) {
case 0:
} else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
if (nau8825_is_jack_inserted(regmap)) {
event |= nau8825_jack_insert(nau8825);
+ if (!nau8825->high_imped) {
+ /* Apply the cross talk suppression in the
+ * headset without high impedance.
+ */
+ if (!nau8825->xtalk_protect) {
+ /* Raise protection for cross talk de-
+ * tection if no protection before.
+ * The driver has to cancel the pro-
+ * cess and restore changes if process
+ * is ongoing when ejection.
+ */
+ nau8825->xtalk_protect = true;
+ nau8825_sema_acquire(nau8825, 0);
+ }
+ /* Startup cross talk detection process */
+ nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
+ schedule_work(&nau8825->xtalk_work);
+ } else {
+ /* The cross talk suppression shouldn't apply
+ * in the headset with high impedance. Thus,
+ * relieve the protection raised before.
+ */
+ if (nau8825->xtalk_protect) {
+ nau8825_sema_release(nau8825);
+ nau8825->xtalk_protect = false;
+ }
+ }
} else {
dev_warn(nau8825->dev, "Headset completion IRQ fired but no headset connected\n");
nau8825_eject_jack(nau8825);
event_mask |= SND_JACK_HEADSET;
clear_irq = NAU8825_HEADSET_COMPLETION_IRQ;
+ /* Record the interruption report event for driver to report
+ * the event later. The jack report will delay until cross
+ * talk detection process is done.
+ */
+ if (nau8825->xtalk_state == NAU8825_XTALK_PREPARE) {
+ nau8825->xtalk_event = event;
+ nau8825->xtalk_event_mask = event_mask;
+ }
+ } else if (active_irq & NAU8825_IMPEDANCE_MEAS_IRQ) {
+ schedule_work(&nau8825->xtalk_work);
+ clear_irq = NAU8825_IMPEDANCE_MEAS_IRQ;
+ } else if ((active_irq & NAU8825_JACK_INSERTION_IRQ_MASK) ==
+ NAU8825_JACK_INSERTION_DETECTED) {
+ /* One more step to check GPIO status directly. Thus, the
+ * driver can confirm the real insertion interruption because
+ * the intrruption at manual mode has bypassed debounce
+ * circuit which can get rid of unstable status.
+ */
+ if (nau8825_is_jack_inserted(regmap)) {
+ /* Turn off insertion interruption at manual mode */
+ regmap_update_bits(regmap,
+ NAU8825_REG_INTERRUPT_DIS_CTRL,
+ NAU8825_IRQ_INSERT_DIS,
+ NAU8825_IRQ_INSERT_DIS);
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_INSERT_EN, NAU8825_IRQ_INSERT_EN);
+ /* Enable interruption for jack type detection at audo
+ * mode which can detect microphone and jack type.
+ */
+ nau8825_setup_auto_irq(nau8825);
+ }
}
if (!clear_irq)
/* clears the rightmost interruption */
regmap_write(regmap, NAU8825_REG_INT_CLR_KEY_STATUS, clear_irq);
- if (event_mask)
+ /* Delay jack report until cross talk detection is done. It can avoid
+ * application to do playback preparation when cross talk detection
+ * process is still working. Otherwise, the resource like clock and
+ * power will be issued by them at the same time and conflict happens.
+ */
+ if (event_mask && nau8825->xtalk_state == NAU8825_XTALK_DONE)
snd_soc_jack_report(nau8825->jack, event, event_mask);
return IRQ_HANDLED;
NAU8825_RDAC_CLK_DELAY_MASK | NAU8825_RDAC_VREF_MASK,
(0x2 << NAU8825_RDAC_CLK_DELAY_SFT) |
(0x3 << NAU8825_RDAC_VREF_SFT));
+ /* Config L/R channel */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACL_CTRL,
+ NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_L);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACR_CTRL,
+ NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_R);
}
static const struct regmap_config nau8825_regmap_config = {
- .val_bits = 16,
- .reg_bits = 16,
+ .val_bits = NAU8825_REG_DATA_LEN,
+ .reg_bits = NAU8825_REG_ADDR_LEN,
.max_register = NAU8825_REG_MAX,
.readable_reg = nau8825_readable_reg,
nau8825->dapm = dapm;
- /* The interrupt clock is gated by x1[10:8],
- * one of them needs to be enabled all the time for
- * interrupts to happen.
- */
- snd_soc_dapm_force_enable_pin(dapm, "DDACR");
- snd_soc_dapm_sync(dapm);
+ return 0;
+}
- /* Unmask interruptions. Handler uses dapm object so we can enable
- * interruptions only after dapm is fully initialized.
- */
- regmap_write(nau8825->regmap, NAU8825_REG_INTERRUPT_DIS_CTRL, 0);
- nau8825_restart_jack_detection(nau8825->regmap);
+static int nau8825_codec_remove(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ /* Cancel and reset cross tak suppresstion detection funciton */
+ nau8825_xtalk_cancel(nau8825);
return 0;
}
static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs,
struct nau8825_fll *fll_param)
{
- u64 fvco;
- unsigned int fref, i;
+ u64 fvco, fvco_max;
+ unsigned int fref, i, fvco_sel;
/* Ensure the reference clock frequency (FREF) is <= 13.5MHz by dividing
* freq_in by 1, 2, 4, or 8 using FLL pre-scalar.
fll_param->ratio = fll_ratio[i].val;
/* Calculate the frequency of DCO (FDCO) given freq_out = 256 * Fs.
- * FDCO must be within the 90MHz - 100MHz or the FFL cannot be
+ * FDCO must be within the 90MHz - 124MHz or the FFL cannot be
* guaranteed across the full range of operation.
* FDCO = freq_out * 2 * mclk_src_scaling
*/
+ fvco_max = 0;
+ fvco_sel = ARRAY_SIZE(mclk_src_scaling);
for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
- if (NAU_FVCO_MIN < fvco && fvco < NAU_FVCO_MAX)
- break;
+ if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
+ fvco_max < fvco) {
+ fvco_max = fvco;
+ fvco_sel = i;
+ }
}
- if (i == ARRAY_SIZE(mclk_src_scaling))
+ if (ARRAY_SIZE(mclk_src_scaling) == fvco_sel)
return -EINVAL;
- fll_param->mclk_src = mclk_src_scaling[i].val;
+ fll_param->mclk_src = mclk_src_scaling[fvco_sel].val;
/* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional
* input based on FDCO, FREF and FLL ratio.
struct nau8825_fll *fll_param)
{
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
- NAU8825_CLK_MCLK_SRC_MASK, fll_param->mclk_src);
+ NAU8825_CLK_SRC_MASK | NAU8825_CLK_MCLK_SRC_MASK,
+ NAU8825_CLK_SRC_MCLK | fll_param->mclk_src);
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL1,
NAU8825_FLL_RATIO_MASK, fll_param->ratio);
/* FLL 16-bit fractional input */
NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
/* select divided VCO input */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
- NAU8825_FLL_FILTER_SW_MASK, 0x0000);
- /* FLL sigma delta modulator enable */
- regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
- NAU8825_SDM_EN_MASK, NAU8825_SDM_EN);
+ NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
+ /* Disable free-running mode */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_FLL6, NAU8825_DCO_EN, 0);
+ if (fll_param->fll_frac) {
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
+ NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
+ NAU8825_FLL_FTR_SW_MASK,
+ NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
+ NAU8825_FLL_FTR_SW_FILTER);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
+ NAU8825_SDM_EN, NAU8825_SDM_EN);
+ } else {
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
+ NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
+ NAU8825_FLL_FTR_SW_MASK, NAU8825_FLL_FTR_SW_ACCU);
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_FLL6, NAU8825_SDM_EN, 0);
+ }
}
/* freq_out must be 256*Fs in order to achieve the best performance */
return 0;
}
+static int nau8825_mclk_prepare(struct nau8825 *nau8825, unsigned int freq)
+{
+ int ret = 0;
+
+ nau8825->mclk = devm_clk_get(nau8825->dev, "mclk");
+ if (IS_ERR(nau8825->mclk)) {
+ dev_info(nau8825->dev, "No 'mclk' clock found, assume MCLK is managed externally");
+ return 0;
+ }
+
+ if (!nau8825->mclk_freq) {
+ ret = clk_prepare_enable(nau8825->mclk);
+ if (ret) {
+ dev_err(nau8825->dev, "Unable to prepare codec mclk\n");
+ return ret;
+ }
+ }
+
+ if (nau8825->mclk_freq != freq) {
+ freq = clk_round_rate(nau8825->mclk, freq);
+ ret = clk_set_rate(nau8825->mclk, freq);
+ if (ret) {
+ dev_err(nau8825->dev, "Unable to set mclk rate\n");
+ return ret;
+ }
+ nau8825->mclk_freq = freq;
+ }
+
+ return 0;
+}
+
+static void nau8825_configure_mclk_as_sysclk(struct regmap *regmap)
+{
+ regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_MCLK);
+ regmap_update_bits(regmap, NAU8825_REG_FLL6,
+ NAU8825_DCO_EN, 0);
+}
+
static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
unsigned int freq)
{
int ret;
switch (clk_id) {
+ case NAU8825_CLK_DIS:
+ /* Clock provided externally and disable internal VCO clock */
+ nau8825_configure_mclk_as_sysclk(regmap);
+ if (nau8825->mclk_freq) {
+ clk_disable_unprepare(nau8825->mclk);
+ nau8825->mclk_freq = 0;
+ }
+
+ break;
case NAU8825_CLK_MCLK:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_configure_mclk_as_sysclk(regmap);
+ /* MCLK not changed by clock tree */
regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
- NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_MCLK);
- regmap_update_bits(regmap, NAU8825_REG_FLL6, NAU8825_DCO_EN, 0);
+ NAU8825_CLK_MCLK_SRC_MASK, 0);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
- /* We selected MCLK source but the clock itself managed externally */
- if (!nau8825->mclk)
- break;
+ ret = nau8825_mclk_prepare(nau8825, freq);
+ if (ret)
+ return ret;
- if (!nau8825->mclk_freq) {
- ret = clk_prepare_enable(nau8825->mclk);
- if (ret) {
- dev_err(nau8825->dev, "Unable to prepare codec mclk\n");
- return ret;
- }
+ break;
+ case NAU8825_CLK_INTERNAL:
+ if (nau8825_is_jack_inserted(nau8825->regmap)) {
+ regmap_update_bits(regmap, NAU8825_REG_FLL6,
+ NAU8825_DCO_EN, NAU8825_DCO_EN);
+ regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
+ /* Decrease the VCO frequency for power saving */
+ regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_MCLK_SRC_MASK, 0xf);
+ regmap_update_bits(regmap, NAU8825_REG_FLL1,
+ NAU8825_FLL_RATIO_MASK, 0x10);
+ regmap_update_bits(regmap, NAU8825_REG_FLL6,
+ NAU8825_SDM_EN, NAU8825_SDM_EN);
+ } else {
+ /* The clock turns off intentionally for power saving
+ * when no headset connected.
+ */
+ nau8825_configure_mclk_as_sysclk(regmap);
+ dev_warn(nau8825->dev, "Disable clock for power saving when no headset connected\n");
+ }
+ if (nau8825->mclk_freq) {
+ clk_disable_unprepare(nau8825->mclk);
+ nau8825->mclk_freq = 0;
}
- if (nau8825->mclk_freq != freq) {
- nau8825->mclk_freq = freq;
+ break;
+ case NAU8825_CLK_FLL_MCLK:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ regmap_update_bits(regmap, NAU8825_REG_FLL3,
+ NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_MCLK);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
- freq = clk_round_rate(nau8825->mclk, freq);
- ret = clk_set_rate(nau8825->mclk, freq);
- if (ret) {
- dev_err(nau8825->dev, "Unable to set mclk rate\n");
- return ret;
- }
+ ret = nau8825_mclk_prepare(nau8825, freq);
+ if (ret)
+ return ret;
+
+ break;
+ case NAU8825_CLK_FLL_BLK:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ regmap_update_bits(regmap, NAU8825_REG_FLL3,
+ NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_BLK);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
+
+ if (nau8825->mclk_freq) {
+ clk_disable_unprepare(nau8825->mclk);
+ nau8825->mclk_freq = 0;
}
break;
- case NAU8825_CLK_INTERNAL:
- regmap_update_bits(regmap, NAU8825_REG_FLL6, NAU8825_DCO_EN,
- NAU8825_DCO_EN);
- regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
- NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
+ case NAU8825_CLK_FLL_FS:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ regmap_update_bits(regmap, NAU8825_REG_FLL3,
+ NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_FS);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
if (nau8825->mclk_freq) {
clk_disable_unprepare(nau8825->mclk);
return nau8825_configure_sysclk(nau8825, clk_id, freq);
}
+static int nau8825_resume_setup(struct nau8825 *nau8825)
+{
+ struct regmap *regmap = nau8825->regmap;
+
+ /* Close clock when jack type detection at manual mode */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_DIS, 0);
+
+ /* Clear all interruption status */
+ nau8825_int_status_clear_all(regmap);
+
+ /* Enable both insertion and ejection interruptions, and then
+ * bypass de-bounce circuit.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_HEADSET_COMPLETE_EN |
+ NAU8825_IRQ_EJECT_EN | NAU8825_IRQ_INSERT_EN,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_HEADSET_COMPLETE_EN);
+ regmap_update_bits(regmap, NAU8825_REG_JACK_DET_CTRL,
+ NAU8825_JACK_DET_DB_BYPASS, NAU8825_JACK_DET_DB_BYPASS);
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_DIS_CTRL,
+ NAU8825_IRQ_INSERT_DIS | NAU8825_IRQ_EJECT_DIS, 0);
+
+ return 0;
+}
+
static int nau8825_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
return ret;
}
}
+ /* Setup codec configuration after resume */
+ nau8825_resume_setup(nau8825);
}
break;
case SND_SOC_BIAS_OFF:
+ /* Cancel and reset cross talk detection funciton */
+ nau8825_xtalk_cancel(nau8825);
+ /* Turn off all interruptions before system shutdown. Keep the
+ * interruption quiet before resume setup completes.
+ */
+ regmap_write(nau8825->regmap,
+ NAU8825_REG_INTERRUPT_DIS_CTRL, 0xffff);
+ /* Disable ADC needed for interruptions at audo mode */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, 0);
if (nau8825->mclk_freq)
clk_disable_unprepare(nau8825->mclk);
break;
return 0;
}
-#ifdef CONFIG_PM
-static int nau8825_suspend(struct snd_soc_codec *codec)
+static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec)
{
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
disable_irq(nau8825->irq);
+ snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
regcache_cache_only(nau8825->regmap, true);
regcache_mark_dirty(nau8825->regmap);
return 0;
}
-static int nau8825_resume(struct snd_soc_codec *codec)
+static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
{
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
- /* The chip may lose power and reset in S3. regcache_sync restores
- * register values including configurations for sysclk, irq, and
- * jack/button detection.
- */
regcache_cache_only(nau8825->regmap, false);
regcache_sync(nau8825->regmap);
-
- /* Check the jack plug status directly. If the headset is unplugged
- * during S3 when the chip has no power, there will be no jack
- * detection irq even after the nau8825_restart_jack_detection below,
- * because the chip just thinks no headset has ever been plugged in.
- */
- if (!nau8825_is_jack_inserted(nau8825->regmap)) {
- nau8825_eject_jack(nau8825);
- snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+ if (nau8825_is_jack_inserted(nau8825->regmap)) {
+ /* If the jack is inserted, we need to check whether the play-
+ * back is active before suspend. If active, the driver has to
+ * raise the protection for cross talk function to avoid the
+ * playback recovers before cross talk process finish. Other-
+ * wise, the playback will be interfered by cross talk func-
+ * tion. It is better to apply hardware related parameters
+ * before starting playback or record.
+ */
+ if (nau8825_dai_is_active(nau8825)) {
+ nau8825->xtalk_protect = true;
+ nau8825_sema_acquire(nau8825, 0);
+ }
}
-
enable_irq(nau8825->irq);
- /* Run jack detection to check the type (OMTP or CTIA) of the headset
- * if there is one. This handles the case where a different type of
- * headset is plugged in during S3. This triggers an IRQ iff a headset
- * is already plugged in.
- */
- nau8825_restart_jack_detection(nau8825->regmap);
-
return 0;
}
-#else
-#define nau8825_suspend NULL
-#define nau8825_resume NULL
-#endif
static struct snd_soc_codec_driver nau8825_codec_driver = {
.probe = nau8825_codec_probe,
+ .remove = nau8825_codec_remove,
.set_sysclk = nau8825_set_sysclk,
.set_pll = nau8825_set_pll,
.set_bias_level = nau8825_set_bias_level,
static int nau8825_setup_irq(struct nau8825 *nau8825)
{
- struct regmap *regmap = nau8825->regmap;
int ret;
- /* IRQ Output Enable */
- regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
- NAU8825_IRQ_OUTPUT_EN, NAU8825_IRQ_OUTPUT_EN);
-
- /* Enable internal VCO needed for interruptions */
- nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
-
- /* Enable DDACR needed for interrupts
- * It is the same as force_enable_pin("DDACR") we do later
- */
- regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
- NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
-
ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"nau8825", nau8825);
return PTR_ERR(nau8825->regmap);
nau8825->dev = dev;
nau8825->irq = i2c->irq;
+ /* Initiate parameters, semaphone and work queue which are needed in
+ * cross talk suppression measurment function.
+ */
+ nau8825->xtalk_state = NAU8825_XTALK_DONE;
+ nau8825->xtalk_protect = false;
+ sema_init(&nau8825->xtalk_sem, 1);
+ INIT_WORK(&nau8825->xtalk_work, nau8825_xtalk_work);
nau8825_print_device_properties(nau8825);
{ "nau8825", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, nau8825_i2c_ids);
#ifdef CONFIG_OF
static const struct of_device_id nau8825_of_ids[] = {
#define NAU8825_REG_CHARGE_PUMP_INPUT_READ 0x81
#define NAU8825_REG_GENERAL_STATUS 0x82
#define NAU8825_REG_MAX NAU8825_REG_GENERAL_STATUS
+/* 16-bit control register address, and 16-bits control register data */
+#define NAU8825_REG_ADDR_LEN 16
+#define NAU8825_REG_DATA_LEN 16
/* ENA_CTRL (0x1) */
#define NAU8825_ENABLE_DACR_SFT 10
#define NAU8825_ENABLE_DACR (1 << NAU8825_ENABLE_DACR_SFT)
#define NAU8825_ENABLE_DACL_SFT 9
+#define NAU8825_ENABLE_DACL (1 << NAU8825_ENABLE_DACL_SFT)
#define NAU8825_ENABLE_ADC_SFT 8
+#define NAU8825_ENABLE_ADC (1 << NAU8825_ENABLE_ADC_SFT)
+#define NAU8825_ENABLE_ADC_CLK_SFT 7
+#define NAU8825_ENABLE_ADC_CLK (1 << NAU8825_ENABLE_ADC_CLK_SFT)
+#define NAU8825_ENABLE_DAC_CLK_SFT 6
+#define NAU8825_ENABLE_DAC_CLK (1 << NAU8825_ENABLE_DAC_CLK_SFT)
#define NAU8825_ENABLE_SAR_SFT 1
/* CLK_DIVIDER (0x3) */
/* FLL3 (0x06) */
#define NAU8825_FLL_INTEGER_MASK (0x3ff << 0)
+#define NAU8825_FLL_CLK_SRC_SFT 10
+#define NAU8825_FLL_CLK_SRC_MASK (0x3 << NAU8825_FLL_CLK_SRC_SFT)
+#define NAU8825_FLL_CLK_SRC_MCLK (0 << NAU8825_FLL_CLK_SRC_SFT)
+#define NAU8825_FLL_CLK_SRC_BLK (0x2 << NAU8825_FLL_CLK_SRC_SFT)
+#define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT)
/* FLL4 (0x07) */
#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
/* FLL5 (0x08) */
-#define NAU8825_FLL_FILTER_SW_MASK (0x1 << 14)
+#define NAU8825_FLL_PDB_DAC_EN (0x1 << 15)
+#define NAU8825_FLL_LOOP_FTR_EN (0x1 << 14)
+#define NAU8825_FLL_CLK_SW_MASK (0x1 << 13)
+#define NAU8825_FLL_CLK_SW_N2 (0x1 << 13)
+#define NAU8825_FLL_CLK_SW_REF (0x0 << 13)
+#define NAU8825_FLL_FTR_SW_MASK (0x1 << 12)
+#define NAU8825_FLL_FTR_SW_ACCU (0x1 << 12)
+#define NAU8825_FLL_FTR_SW_FILTER (0x0 << 12)
/* FLL6 (0x9) */
-#define NAU8825_DCO_EN_MASK (0x1 << 15)
#define NAU8825_DCO_EN (0x1 << 15)
-#define NAU8825_DCO_DIS (0x0 << 15)
-#define NAU8825_SDM_EN_MASK (0x1 << 14)
#define NAU8825_SDM_EN (0x1 << 14)
-#define NAU8825_SDM_DIS (0x0 << 14)
/* HSD_CTRL (0xc) */
#define NAU8825_HSD_AUTO_MODE (1 << 6)
/* JACK_DET_CTRL (0xd) */
#define NAU8825_JACK_DET_RESTART (1 << 9)
+#define NAU8825_JACK_DET_DB_BYPASS (1 << 8)
#define NAU8825_JACK_INSERT_DEBOUNCE_SFT 5
#define NAU8825_JACK_INSERT_DEBOUNCE_MASK (0x7 << NAU8825_JACK_INSERT_DEBOUNCE_SFT)
#define NAU8825_JACK_EJECT_DEBOUNCE_SFT 2
/* INTERRUPT_MASK (0xf) */
#define NAU8825_IRQ_OUTPUT_EN (1 << 11)
#define NAU8825_IRQ_HEADSET_COMPLETE_EN (1 << 10)
+#define NAU8825_IRQ_RMS_EN (1 << 8)
#define NAU8825_IRQ_KEY_RELEASE_EN (1 << 7)
#define NAU8825_IRQ_KEY_SHORT_PRESS_EN (1 << 5)
#define NAU8825_IRQ_EJECT_EN (1 << 2)
+#define NAU8825_IRQ_INSERT_EN (1 << 0)
/* IRQ_STATUS (0x10) */
#define NAU8825_HEADSET_COMPLETION_IRQ (1 << 10)
#define NAU8825_IRQ_KEY_RELEASE_DIS (1 << 7)
#define NAU8825_IRQ_KEY_SHORT_PRESS_DIS (1 << 5)
#define NAU8825_IRQ_EJECT_DIS (1 << 2)
+#define NAU8825_IRQ_INSERT_DIS (1 << 0)
/* SAR_CTRL (0x13) */
#define NAU8825_SAR_ADC_EN_SFT 12
/* I2S_PCM_CTRL2 (0x1d) */
#define NAU8825_I2S_TRISTATE (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
+#define NAU8825_I2S_DRV_SFT 12
+#define NAU8825_I2S_DRV_MASK (0x3 << NAU8825_I2S_DRV_SFT)
#define NAU8825_I2S_MS_SFT 3
#define NAU8825_I2S_MS_MASK (1 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_MS_MASTER (1 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_MS_SLAVE (0 << NAU8825_I2S_MS_SFT)
+#define NAU8825_I2S_BLK_DIV_MASK 0x7
+
+/* BIQ_CTRL (0x20) */
+#define NAU8825_BIQ_WRT_SFT 4
+#define NAU8825_BIQ_WRT_EN (1 << NAU8825_BIQ_WRT_SFT)
+#define NAU8825_BIQ_PATH_SFT 0
+#define NAU8825_BIQ_PATH_MASK (1 << NAU8825_BIQ_PATH_SFT)
+#define NAU8825_BIQ_PATH_ADC (0 << NAU8825_BIQ_PATH_SFT)
+#define NAU8825_BIQ_PATH_DAC (1 << NAU8825_BIQ_PATH_SFT)
/* ADC_RATE (0x2b) */
#define NAU8825_ADC_SYNC_DOWN_SFT 0
#define NAU8825_DAC_OVERSAMPLE_128 2
#define NAU8825_DAC_OVERSAMPLE_32 4
+/* ADC_DGAIN_CTRL (0x30) */
+#define NAU8825_ADC_DIG_VOL_MASK 0xff
+
/* MUTE_CTRL (0x31) */
#define NAU8825_DAC_ZERO_CROSSING_EN (1 << 9)
#define NAU8825_DAC_SOFT_MUTE (1 << 9)
/* HSVOL_CTRL (0x32) */
#define NAU8825_HP_MUTE (1 << 15)
+#define NAU8825_HP_MUTE_AUTO (1 << 14)
+#define NAU8825_HPL_MUTE (1 << 13)
+#define NAU8825_HPR_MUTE (1 << 12)
+#define NAU8825_HPL_VOL_SFT 6
+#define NAU8825_HPL_VOL_MASK (0x3f << NAU8825_HPL_VOL_SFT)
+#define NAU8825_HPR_VOL_SFT 0
+#define NAU8825_HPR_VOL_MASK (0x3f << NAU8825_HPR_VOL_SFT)
+#define NAU8825_HP_VOL_MIN 0x36
/* DACL_CTRL (0x33) */
#define NAU8825_DACL_CH_SEL_SFT 9
+#define NAU8825_DACL_CH_SEL_MASK (0x1 << NAU8825_DACL_CH_SEL_SFT)
+#define NAU8825_DACL_CH_SEL_L (0x0 << NAU8825_DACL_CH_SEL_SFT)
+#define NAU8825_DACL_CH_SEL_R (0x1 << NAU8825_DACL_CH_SEL_SFT)
+#define NAU8825_DACL_CH_VOL_MASK 0xff
/* DACR_CTRL (0x34) */
#define NAU8825_DACR_CH_SEL_SFT 9
+#define NAU8825_DACR_CH_SEL_MASK (0x1 << NAU8825_DACR_CH_SEL_SFT)
+#define NAU8825_DACR_CH_SEL_L (0x0 << NAU8825_DACR_CH_SEL_SFT)
+#define NAU8825_DACR_CH_SEL_R (0x1 << NAU8825_DACR_CH_SEL_SFT)
+#define NAU8825_DACR_CH_VOL_MASK 0xff
+
+/* IMM_MODE_CTRL (0x4C) */
+#define NAU8825_IMM_THD_SFT 8
+#define NAU8825_IMM_THD_MASK (0x3f << NAU8825_IMM_THD_SFT)
+#define NAU8825_IMM_GEN_VOL_SFT 6
+#define NAU8825_IMM_GEN_VOL_MASK (0x3 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_2nd (0x0 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_4th (0x1 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_8th (0x2 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_16th (0x3 << NAU8825_IMM_GEN_VOL_SFT)
+
+#define NAU8825_IMM_CYC_SFT 4
+#define NAU8825_IMM_CYC_MASK (0x3 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_1024 (0x0 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_2048 (0x1 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_4096 (0x2 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_8192 (0x3 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_EN (1 << 3)
+#define NAU8825_IMM_DAC_SRC_MASK 0x7
+#define NAU8825_IMM_DAC_SRC_BIQ 0x0
+#define NAU8825_IMM_DAC_SRC_DRC 0x1
+#define NAU8825_IMM_DAC_SRC_MIX 0x2
+#define NAU8825_IMM_DAC_SRC_SIN 0x3
/* CLASSG_CTRL (0x50) */
#define NAU8825_CLASSG_TIMER_SFT 8
#define NAU8825_CLASSG_TIMER_MASK (0x3f << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_1ms (0x1 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_2ms (0x2 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_8ms (0x4 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_16ms (0x8 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_32ms (0x10 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_64ms (0x20 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_LDAC_EN (0x1 << 2)
+#define NAU8825_CLASSG_RDAC_EN (0x1 << 1)
#define NAU8825_CLASSG_EN (1 << 0)
/* I2C_DEVICE_ID (0x58) */
#define NAU8825_SOFTWARE_ID_NAU8825 0x0
/* BIAS_ADJ (0x66) */
-#define NAU8825_BIAS_TESTDAC_EN (0x3 << 8)
+#define NAU8825_BIAS_HPR_IMP (1 << 15)
+#define NAU8825_BIAS_HPL_IMP (1 << 14)
+#define NAU8825_BIAS_TESTDAC_SFT 8
+#define NAU8825_BIAS_TESTDAC_EN (0x3 << NAU8825_BIAS_TESTDAC_SFT)
+#define NAU8825_BIAS_TESTDACR_EN (0x2 << NAU8825_BIAS_TESTDAC_SFT)
+#define NAU8825_BIAS_TESTDACL_EN (0x1 << NAU8825_BIAS_TESTDAC_SFT)
#define NAU8825_BIAS_VMID (1 << 6)
#define NAU8825_BIAS_VMID_SEL_SFT 4
#define NAU8825_BIAS_VMID_SEL_MASK (3 << NAU8825_BIAS_VMID_SEL_SFT)
#define NAU8825_POWERUP_ADCL (1 << 6)
/* RDAC (0x73) */
+#define NAU8825_RDAC_FS_BCLK_ENB (1 << 15)
+#define NAU8825_RDAC_EN_SFT 12
+#define NAU8825_RDAC_EN (0x3 << NAU8825_RDAC_EN_SFT)
+#define NAU8825_RDAC_CLK_EN_SFT 8
+#define NAU8825_RDAC_CLK_EN (0x3 << NAU8825_RDAC_CLK_EN_SFT)
#define NAU8825_RDAC_CLK_DELAY_SFT 4
#define NAU8825_RDAC_CLK_DELAY_MASK (0x7 << NAU8825_RDAC_CLK_DELAY_SFT)
#define NAU8825_RDAC_VREF_SFT 2
/* System Clock Source */
enum {
- NAU8825_CLK_MCLK = 0,
+ NAU8825_CLK_DIS = 0,
+ NAU8825_CLK_MCLK,
NAU8825_CLK_INTERNAL,
+ NAU8825_CLK_FLL_MCLK,
+ NAU8825_CLK_FLL_BLK,
+ NAU8825_CLK_FLL_FS,
+};
+
+/* Cross talk detection state */
+enum {
+ NAU8825_XTALK_PREPARE = 0,
+ NAU8825_XTALK_HPR_R2L,
+ NAU8825_XTALK_HPL_R2L,
+ NAU8825_XTALK_IMM,
+ NAU8825_XTALK_DONE,
};
struct nau8825 {
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack *jack;
struct clk *mclk;
+ struct work_struct xtalk_work;
+ struct semaphore xtalk_sem;
int irq;
int mclk_freq; /* 0 - mclk is disabled */
int button_pressed;
int key_debounce;
int jack_insert_debounce;
int jack_eject_debounce;
+ int high_imped;
+ int xtalk_state;
+ int xtalk_event;
+ int xtalk_event_mask;
+ bool xtalk_protect;
+ int imp_rms[NAU8825_XTALK_IMM];
};
int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
return !((reg == 0x00) || (reg == 0x0f));
}
-static bool pcm1681_writeable_reg(struct device *dev, unsigned register reg)
+static bool pcm1681_writeable_reg(struct device *dev, unsigned int reg)
{
return pcm1681_accessible_reg(dev, reg) &&
(reg != PCM1681_ZERO_DETECT_STATUS);
return reg >= 0x10 && reg <= 0x17;
}
-static bool pcm179x_writeable_reg(struct device *dev, unsigned register reg)
+static bool pcm179x_writeable_reg(struct device *dev, unsigned int reg)
{
bool accessible;
.remove = pcm5102a_remove,
.driver = {
.name = "pcm5102a-codec",
- .owner = THIS_MODULE,
.of_match_table = pcm5102a_of_match,
},
};
DMI_MATCH(DMI_PRODUCT_NAME, "Skylake Client platform")
}
},
+ {
+ .ident = "Intel Kabylake RVP",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
+ }
+ },
+
{ }
};
--- /dev/null
+/*
+ * rt5514-spi.c -- RT5514 SPI driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_qos.h>
+#include <linux/sysfs.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rt5514-spi.h"
+
+static struct spi_device *rt5514_spi;
+
+struct rt5514_dsp {
+ struct device *dev;
+ struct delayed_work copy_work;
+ struct mutex dma_lock;
+ struct snd_pcm_substream *substream;
+ unsigned int buf_base, buf_limit, buf_rp;
+ size_t buf_size;
+ size_t dma_offset;
+ size_t dsp_offset;
+};
+
+static const struct snd_pcm_hardware rt5514_spi_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = 0x20000 / 8,
+ .periods_min = 8,
+ .periods_max = 8,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = 0x20000,
+};
+
+static struct snd_soc_dai_driver rt5514_spi_dai = {
+ .name = "rt5514-dsp-cpu-dai",
+ .id = 0,
+ .capture = {
+ .stream_name = "DSP Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static void rt5514_spi_copy_work(struct work_struct *work)
+{
+ struct rt5514_dsp *rt5514_dsp =
+ container_of(work, struct rt5514_dsp, copy_work.work);
+ struct snd_pcm_runtime *runtime;
+ size_t period_bytes, truncated_bytes = 0;
+
+ mutex_lock(&rt5514_dsp->dma_lock);
+ if (!rt5514_dsp->substream) {
+ dev_err(rt5514_dsp->dev, "No pcm substream\n");
+ goto done;
+ }
+
+ runtime = rt5514_dsp->substream->runtime;
+ period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
+
+ if (rt5514_dsp->buf_size - rt5514_dsp->dsp_offset < period_bytes)
+ period_bytes = rt5514_dsp->buf_size - rt5514_dsp->dsp_offset;
+
+ if (rt5514_dsp->buf_rp + period_bytes <= rt5514_dsp->buf_limit) {
+ rt5514_spi_burst_read(rt5514_dsp->buf_rp,
+ runtime->dma_area + rt5514_dsp->dma_offset,
+ period_bytes);
+
+ if (rt5514_dsp->buf_rp + period_bytes == rt5514_dsp->buf_limit)
+ rt5514_dsp->buf_rp = rt5514_dsp->buf_base;
+ else
+ rt5514_dsp->buf_rp += period_bytes;
+ } else {
+ truncated_bytes = rt5514_dsp->buf_limit - rt5514_dsp->buf_rp;
+ rt5514_spi_burst_read(rt5514_dsp->buf_rp,
+ runtime->dma_area + rt5514_dsp->dma_offset,
+ truncated_bytes);
+
+ rt5514_spi_burst_read(rt5514_dsp->buf_base,
+ runtime->dma_area + rt5514_dsp->dma_offset +
+ truncated_bytes, period_bytes - truncated_bytes);
+
+ rt5514_dsp->buf_rp = rt5514_dsp->buf_base +
+ period_bytes - truncated_bytes;
+ }
+
+ rt5514_dsp->dma_offset += period_bytes;
+ if (rt5514_dsp->dma_offset >= runtime->dma_bytes)
+ rt5514_dsp->dma_offset = 0;
+
+ rt5514_dsp->dsp_offset += period_bytes;
+
+ snd_pcm_period_elapsed(rt5514_dsp->substream);
+
+ if (rt5514_dsp->dsp_offset < rt5514_dsp->buf_size)
+ schedule_delayed_work(&rt5514_dsp->copy_work, 5);
+done:
+ mutex_unlock(&rt5514_dsp->dma_lock);
+}
+
+/* PCM for streaming audio from the DSP buffer */
+static int rt5514_spi_pcm_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &rt5514_spi_pcm_hardware);
+
+ return 0;
+}
+
+static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+ int ret;
+
+ mutex_lock(&rt5514_dsp->dma_lock);
+ ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+ rt5514_dsp->substream = substream;
+ mutex_unlock(&rt5514_dsp->dma_lock);
+
+ return ret;
+}
+
+static int rt5514_spi_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+
+ mutex_lock(&rt5514_dsp->dma_lock);
+ rt5514_dsp->substream = NULL;
+ mutex_unlock(&rt5514_dsp->dma_lock);
+
+ cancel_delayed_work_sync(&rt5514_dsp->copy_work);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int rt5514_spi_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+ u8 buf[8];
+
+ rt5514_dsp->dma_offset = 0;
+ rt5514_dsp->dsp_offset = 0;
+
+ /**
+ * The address area x1800XXXX is the register address, and it cannot
+ * support spi burst read perfectly. So we use the spi burst read
+ * individually to make sure the data correctly.
+ */
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_BASE, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_base = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_LIMIT, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_limit = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_RP, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_rp = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_SIZE, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_size = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ return 0;
+}
+
+static int rt5514_spi_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+
+ if (cmd == SNDRV_PCM_TRIGGER_START) {
+ if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
+ rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
+ schedule_delayed_work(&rt5514_dsp->copy_work, 0);
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t rt5514_spi_pcm_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+
+ return bytes_to_frames(runtime, rt5514_dsp->dma_offset);
+}
+
+static struct snd_pcm_ops rt5514_spi_pcm_ops = {
+ .open = rt5514_spi_pcm_open,
+ .hw_params = rt5514_spi_hw_params,
+ .hw_free = rt5514_spi_hw_free,
+ .trigger = rt5514_spi_trigger,
+ .prepare = rt5514_spi_prepare,
+ .pointer = rt5514_spi_pcm_pointer,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ .page = snd_pcm_lib_get_vmalloc_page,
+};
+
+static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
+{
+ struct rt5514_dsp *rt5514_dsp;
+
+ rt5514_dsp = devm_kzalloc(platform->dev, sizeof(*rt5514_dsp),
+ GFP_KERNEL);
+
+ rt5514_dsp->dev = &rt5514_spi->dev;
+ mutex_init(&rt5514_dsp->dma_lock);
+ INIT_DELAYED_WORK(&rt5514_dsp->copy_work, rt5514_spi_copy_work);
+ snd_soc_platform_set_drvdata(platform, rt5514_dsp);
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver rt5514_spi_platform = {
+ .probe = rt5514_spi_pcm_probe,
+ .ops = &rt5514_spi_pcm_ops,
+};
+
+static const struct snd_soc_component_driver rt5514_spi_dai_component = {
+ .name = "rt5514-spi-dai",
+};
+
+/**
+ * rt5514_spi_burst_read - Read data from SPI by rt5514 address.
+ * @addr: Start address.
+ * @rxbuf: Data Buffer for reading.
+ * @len: Data length, it must be a multiple of 8.
+ *
+ *
+ * Returns true for success.
+ */
+int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len)
+{
+ u8 spi_cmd = RT5514_SPI_CMD_BURST_READ;
+ int status;
+ u8 write_buf[8];
+ unsigned int i, end, offset = 0;
+
+ struct spi_message message;
+ struct spi_transfer x[3];
+
+ while (offset < len) {
+ if (offset + RT5514_SPI_BUF_LEN <= len)
+ end = RT5514_SPI_BUF_LEN;
+ else
+ end = len % RT5514_SPI_BUF_LEN;
+
+ write_buf[0] = spi_cmd;
+ write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
+ write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+ write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+ write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof(x));
+
+ x[0].len = 5;
+ x[0].tx_buf = write_buf;
+ spi_message_add_tail(&x[0], &message);
+
+ x[1].len = 4;
+ x[1].tx_buf = write_buf;
+ spi_message_add_tail(&x[1], &message);
+
+ x[2].len = end;
+ x[2].rx_buf = rxbuf + offset;
+ spi_message_add_tail(&x[2], &message);
+
+ status = spi_sync(rt5514_spi, &message);
+
+ if (status)
+ return false;
+
+ offset += RT5514_SPI_BUF_LEN;
+ }
+
+ for (i = 0; i < len; i += 8) {
+ write_buf[0] = rxbuf[i + 0];
+ write_buf[1] = rxbuf[i + 1];
+ write_buf[2] = rxbuf[i + 2];
+ write_buf[3] = rxbuf[i + 3];
+ write_buf[4] = rxbuf[i + 4];
+ write_buf[5] = rxbuf[i + 5];
+ write_buf[6] = rxbuf[i + 6];
+ write_buf[7] = rxbuf[i + 7];
+
+ rxbuf[i + 0] = write_buf[7];
+ rxbuf[i + 1] = write_buf[6];
+ rxbuf[i + 2] = write_buf[5];
+ rxbuf[i + 3] = write_buf[4];
+ rxbuf[i + 4] = write_buf[3];
+ rxbuf[i + 5] = write_buf[2];
+ rxbuf[i + 6] = write_buf[1];
+ rxbuf[i + 7] = write_buf[0];
+ }
+
+ return true;
+}
+
+/**
+ * rt5514_spi_burst_write - Write data to SPI by rt5514 address.
+ * @addr: Start address.
+ * @txbuf: Data Buffer for writng.
+ * @len: Data length, it must be a multiple of 8.
+ *
+ *
+ * Returns true for success.
+ */
+int rt5514_spi_burst_write(u32 addr, const u8 *txbuf, size_t len)
+{
+ u8 spi_cmd = RT5514_SPI_CMD_BURST_WRITE;
+ u8 *write_buf;
+ unsigned int i, end, offset = 0;
+
+ write_buf = kmalloc(RT5514_SPI_BUF_LEN + 6, GFP_KERNEL);
+
+ if (write_buf == NULL)
+ return -ENOMEM;
+
+ while (offset < len) {
+ if (offset + RT5514_SPI_BUF_LEN <= len)
+ end = RT5514_SPI_BUF_LEN;
+ else
+ end = len % RT5514_SPI_BUF_LEN;
+
+ write_buf[0] = spi_cmd;
+ write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
+ write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+ write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+ write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+ for (i = 0; i < end; i += 8) {
+ write_buf[i + 12] = txbuf[offset + i + 0];
+ write_buf[i + 11] = txbuf[offset + i + 1];
+ write_buf[i + 10] = txbuf[offset + i + 2];
+ write_buf[i + 9] = txbuf[offset + i + 3];
+ write_buf[i + 8] = txbuf[offset + i + 4];
+ write_buf[i + 7] = txbuf[offset + i + 5];
+ write_buf[i + 6] = txbuf[offset + i + 6];
+ write_buf[i + 5] = txbuf[offset + i + 7];
+ }
+
+ write_buf[end + 5] = spi_cmd;
+
+ spi_write(rt5514_spi, write_buf, end + 6);
+
+ offset += RT5514_SPI_BUF_LEN;
+ }
+
+ kfree(write_buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5514_spi_burst_write);
+
+static int rt5514_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ rt5514_spi = spi;
+
+ ret = devm_snd_soc_register_platform(&spi->dev, &rt5514_spi_platform);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to register platform.\n");
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(&spi->dev,
+ &rt5514_spi_dai_component,
+ &rt5514_spi_dai, 1);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to register component.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rt5514_of_match[] = {
+ { .compatible = "realtek,rt5514", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5514_of_match);
+
+static struct spi_driver rt5514_spi_driver = {
+ .driver = {
+ .name = "rt5514",
+ .of_match_table = of_match_ptr(rt5514_of_match),
+ },
+ .probe = rt5514_spi_probe,
+};
+module_spi_driver(rt5514_spi_driver);
+
+MODULE_DESCRIPTION("RT5514 SPI driver");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * rt5514-spi.h -- RT5514 driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5514_SPI_H__
+#define __RT5514_SPI_H__
+
+/**
+ * RT5514_SPI_BUF_LEN is the buffer size of SPI master controller.
+*/
+#define RT5514_SPI_BUF_LEN 240
+
+#define RT5514_BUFFER_VOICE_BASE 0x18001034
+#define RT5514_BUFFER_VOICE_LIMIT 0x18001038
+#define RT5514_BUFFER_VOICE_RP 0x1800103c
+#define RT5514_BUFFER_VOICE_SIZE 0x18001040
+
+/* SPI Command */
+enum {
+ RT5514_SPI_CMD_16_READ = 0,
+ RT5514_SPI_CMD_16_WRITE,
+ RT5514_SPI_CMD_32_READ,
+ RT5514_SPI_CMD_32_WRITE,
+ RT5514_SPI_CMD_BURST_READ,
+ RT5514_SPI_CMD_BURST_WRITE,
+};
+
+int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len);
+int rt5514_spi_burst_write(u32 addr, const u8 *txbuf, size_t len);
+
+#endif /* __RT5514_SPI_H__ */
#include "rl6231.h"
#include "rt5514.h"
+#if defined(CONFIG_SND_SOC_RT5514_SPI)
+#include "rt5514-spi.h"
+#endif
static const struct reg_sequence rt5514_i2c_patch[] = {
{0x1800101c, 0x00000000},
{RT5514_VENDOR_ID2, 0x10ec5514},
};
+static void rt5514_enable_dsp_prepare(struct rt5514_priv *rt5514)
+{
+ /* Reset */
+ regmap_write(rt5514->i2c_regmap, 0x18002000, 0x000010ec);
+ /* LDO_I_limit */
+ regmap_write(rt5514->i2c_regmap, 0x18002200, 0x00028604);
+ /* I2C bypass enable */
+ regmap_write(rt5514->i2c_regmap, 0xfafafafa, 0x00000001);
+ /* mini-core reset */
+ regmap_write(rt5514->i2c_regmap, 0x18002f00, 0x0005514b);
+ regmap_write(rt5514->i2c_regmap, 0x18002f00, 0x00055149);
+ /* I2C bypass disable */
+ regmap_write(rt5514->i2c_regmap, 0xfafafafa, 0x00000000);
+ /* PIN config */
+ regmap_write(rt5514->i2c_regmap, 0x18002070, 0x00000040);
+ /* PLL3(QN)=RCOSC*(10+2) */
+ regmap_write(rt5514->i2c_regmap, 0x18002240, 0x0000000a);
+ /* PLL3 source=RCOSC, fsi=rt_clk */
+ regmap_write(rt5514->i2c_regmap, 0x18002100, 0x0000000b);
+ /* Power on RCOSC, pll3 */
+ regmap_write(rt5514->i2c_regmap, 0x18002004, 0x00808b81);
+ /* DSP clk source = pll3, ENABLE DSP clk */
+ regmap_write(rt5514->i2c_regmap, 0x18002f08, 0x00000005);
+ /* Enable DSP clk auto switch */
+ regmap_write(rt5514->i2c_regmap, 0x18001114, 0x00000001);
+ /* Reduce DSP power */
+ regmap_write(rt5514->i2c_regmap, 0x18001118, 0x00000001);
+}
+
static bool rt5514_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = rt5514->dsp_enabled;
+
+ return 0;
+}
+
+static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_codec *codec = rt5514->codec;
+ const struct firmware *fw = NULL;
+
+ if (ucontrol->value.integer.value[0] == rt5514->dsp_enabled)
+ return 0;
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
+ rt5514->dsp_enabled = ucontrol->value.integer.value[0];
+
+ if (rt5514->dsp_enabled) {
+ rt5514_enable_dsp_prepare(rt5514);
+
+ request_firmware(&fw, RT5514_FIRMWARE1, codec->dev);
+ if (fw) {
+#if defined(CONFIG_SND_SOC_RT5514_SPI)
+ rt5514_spi_burst_write(0x4ff60000, fw->data,
+ ((fw->size/8)+1)*8);
+#else
+ dev_err(codec->dev, "There is no SPI driver for"
+ " loading the firmware\n");
+#endif
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ request_firmware(&fw, RT5514_FIRMWARE2, codec->dev);
+ if (fw) {
+#if defined(CONFIG_SND_SOC_RT5514_SPI)
+ rt5514_spi_burst_write(0x4ffc0000, fw->data,
+ ((fw->size/8)+1)*8);
+#else
+ dev_err(codec->dev, "There is no SPI driver for"
+ " loading the firmware\n");
+#endif
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ /* DSP run */
+ regmap_write(rt5514->i2c_regmap, 0x18002f00,
+ 0x00055148);
+ } else {
+ regmap_multi_reg_write(rt5514->i2c_regmap,
+ rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
+ regcache_mark_dirty(rt5514->regmap);
+ regcache_sync(rt5514->regmap);
+ }
+ }
+
+ return 0;
+}
+
static const struct snd_kcontrol_new rt5514_snd_controls[] = {
SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST,
RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv),
SOC_DOUBLE_R_TLV("ADC2 Capture Volume", RT5514_DOWNFILTER1_CTRL1,
RT5514_DOWNFILTER1_CTRL2, RT5514_AD_GAIN_SFT, 127, 0,
adc_vol_tlv),
+ SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0,
+ rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put),
};
/* ADC Mixer*/
return 0;
}
+static int rt5514_pre_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /**
+ * If the DSP is enabled in start of recording, the DSP
+ * should be disabled, and sync back to normal recording
+ * settings to make sure recording properly.
+ */
+ if (rt5514->dsp_enabled) {
+ rt5514->dsp_enabled = 0;
+ regmap_multi_reg_write(rt5514->i2c_regmap,
+ rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
+ regcache_mark_dirty(rt5514->regmap);
+ regcache_sync(rt5514->regmap);
+ }
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
/* Input Lines */
SND_SOC_DAPM_INPUT("DMIC1L"),
/* Audio Interface */
SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_PRE("DAPM Pre", rt5514_pre_event),
};
static const struct snd_soc_dapm_route rt5514_dapm_routes[] = {
return 0;
}
+static int rt5514_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (IS_ERR(rt5514->mclk))
+ break;
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
+ clk_disable_unprepare(rt5514->mclk);
+ } else {
+ ret = clk_prepare_enable(rt5514->mclk);
+ if (ret)
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int rt5514_probe(struct snd_soc_codec *codec)
{
struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+ rt5514->mclk = devm_clk_get(codec->dev, "mclk");
+ if (PTR_ERR(rt5514->mclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
rt5514->codec = codec;
return 0;
static struct snd_soc_codec_driver soc_codec_dev_rt5514 = {
.probe = rt5514_probe,
.idle_bias_off = true,
+ .set_bias_level = rt5514_set_bias_level,
.controls = rt5514_snd_controls,
.num_controls = ARRAY_SIZE(rt5514_snd_controls),
.dapm_widgets = rt5514_dapm_widgets,
.reg_bits = 32,
.val_bits = 32,
- .max_register = RT5514_DSP_MAPPING | RT5514_VENDOR_ID2,
.readable_reg = rt5514_i2c_readable_register,
.cache_type = REGCACHE_NONE,
return -ENODEV;
}
- ret = regmap_register_patch(rt5514->i2c_regmap, rt5514_i2c_patch,
+ ret = regmap_multi_reg_write(rt5514->i2c_regmap, rt5514_i2c_patch,
ARRAY_SIZE(rt5514_i2c_patch));
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply i2c_regmap patch: %d\n",
#ifndef __RT5514_H__
#define __RT5514_H__
+#include <linux/clk.h>
+
#define RT5514_DEVICE_ID 0x10ec5514
#define RT5514_RESET 0x2000
#define RT5514_PLL_INP_MAX 40000000
#define RT5514_PLL_INP_MIN 256000
+#define RT5514_FIRMWARE1 "rt5514_dsp_fw1.bin"
+#define RT5514_FIRMWARE2 "rt5514_dsp_fw2.bin"
+
/* System Clock Source */
enum {
RT5514_SCLK_S_MCLK,
struct rt5514_priv {
struct snd_soc_codec *codec;
struct regmap *i2c_regmap, *regmap;
+ struct clk *mclk;
int sysclk;
int sysclk_src;
int lrck;
int pll_src;
int pll_in;
int pll_out;
+ int dsp_enabled;
};
#endif /* __RT5514_H__ */
{RT5645_PR_BASE + 0x20, 0x611f},
{RT5645_PR_BASE + 0x21, 0x4040},
{RT5645_PR_BASE + 0x23, 0x0004},
+ {RT5645_ASRC_4, 0x0120},
};
static const struct reg_sequence rt5650_init_list[] = {
{ 0x83, 0x0000 },
{ 0x84, 0x0000 },
{ 0x85, 0x0000 },
- { 0x8a, 0x0000 },
+ { 0x8a, 0x0120 },
{ 0x8e, 0x0004 },
{ 0x8f, 0x1100 },
{ 0x90, 0x0646 },
{ 0x2b, 0x5454 },
{ 0x2c, 0xaaa0 },
{ 0x2d, 0x0000 },
- { 0x2f, 0x1002 },
+ { 0x2f, 0x5002 },
{ 0x31, 0x5000 },
{ 0x32, 0x0000 },
{ 0x33, 0x0000 },
{ 0x83, 0x0000 },
{ 0x84, 0x0000 },
{ 0x85, 0x0000 },
- { 0x8a, 0x0000 },
+ { 0x8a, 0x0120 },
{ 0x8e, 0x0004 },
{ 0x8f, 0x1100 },
{ 0x90, 0x0646 },
switch (reg) {
case RT5645_RESET:
+ case RT5645_PRIV_INDEX:
case RT5645_PRIV_DATA:
case RT5645_IN1_CTRL1:
case RT5645_IN1_CTRL2:
return ret;
}
+static const char * const rt5645_dac1_vol_ctrl_mode_text[] = {
+ "immediately", "zero crossing", "soft ramp"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5645_dac1_vol_ctrl_mode, RT5645_PR_BASE,
+ RT5645_DA1_ZDET_SFT, rt5645_dac1_vol_ctrl_mode_text);
+
static const struct snd_kcontrol_new rt5645_snd_controls[] = {
/* Speaker Output Volume */
SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL,
SOC_SINGLE("I2S2 Func Switch", RT5645_GPIO_CTRL1, RT5645_I2S2_SEL_SFT,
1, 1),
RT5645_HWEQ("Speaker HWEQ"),
+
+ /* Digital Soft Volume Control */
+ SOC_ENUM("DAC1 Digital Volume Control Func", rt5645_dac1_vol_ctrl_mode),
};
/**
static const struct acpi_device_id rt5645_acpi_match[] = {
{ "10EC5645", 0 },
{ "10EC5650", 0 },
+ { "10EC5640", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
},
},
+ {
+ .ident = "Microsoft Surface 3",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
{ }
};
/* Codec Private Register definition */
+/* DAC ADC Digital Volume (0x00) */
+#define RT5645_DA1_ZDET_SFT 6
+
/* 3D Speaker Control (0x63) */
#define RT5645_3D_SPK_MASK (0x1 << 15)
#define RT5645_3D_SPK_SFT 15
{ RT5670_PR_BASE + 0x14, 0x9a8a },
{ RT5670_PR_BASE + 0x38, 0x3ba1 },
{ RT5670_PR_BASE + 0x3d, 0x3640 },
+ { 0x8a, 0x0123 },
};
static const struct reg_default rt5670_reg[] = {
{ 0x87, 0x0000 },
{ 0x88, 0x0000 },
{ 0x89, 0x0000 },
- { 0x8a, 0x0000 },
+ { 0x8a, 0x0123 },
{ 0x8b, 0x0000 },
{ 0x8c, 0x0003 },
{ 0x8d, 0x0000 },
RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
- 39, 0, out_vol_tlv),
+ 39, 1, out_vol_tlv),
/* OUTPUT Control */
SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
};
.capture = {
.stream_name = "Audio Trace CPU",
.channels_min = 1,
- .channels_max = 6,
+ .channels_max = 4,
.rates = WM5102_RATES,
.formats = WM5102_FORMATS,
},
SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"),
+SND_SOC_DAPM_OUTPUT("DSP Voice Trigger"),
+
+SND_SOC_DAPM_SWITCH("DSP3 Voice Trigger", SND_SOC_NOPM, 2, 0,
+ &arizona_voice_trigger_switch[2]),
+
SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
0, NULL, 0, wm5110_in_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
{ "OUT2L", NULL, "SYSCLK" },
{ "OUT2R", NULL, "SYSCLK" },
{ "OUT3L", NULL, "SYSCLK" },
+ { "OUT3R", NULL, "SYSCLK" },
{ "OUT4L", NULL, "SYSCLK" },
{ "OUT4R", NULL, "SYSCLK" },
{ "OUT5L", NULL, "SYSCLK" },
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
+ { "DRC2 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
{ "DRC2 Signal Activity", NULL, "DRC2L" },
{ "DRC2 Signal Activity", NULL, "DRC2R" },
+
+ { "DSP Voice Trigger", NULL, "SYSCLK" },
+ { "DSP Voice Trigger", NULL, "DSP3 Voice Trigger" },
+ { "DSP3 Voice Trigger", "Switch", "DSP3" },
};
static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
{
struct wm5110_priv *priv = data;
struct arizona *arizona = priv->core.arizona;
+ struct arizona_voice_trigger_info info;
int serviced = 0;
int i, ret;
ret = wm_adsp_compr_handle_irq(&priv->core.adsp[i]);
if (ret != -ENODEV)
serviced++;
+ if (ret == WM_ADSP_COMPR_VOICE_TRIGGER) {
+ info.core = i;
+ arizona_call_notifiers(arizona,
+ ARIZONA_NOTIFY_VOICE_TRIGGER,
+ &info);
+ }
}
if (!serviced) {
arizona_init_spk(codec);
arizona_init_gpio(codec);
arizona_init_mono(codec);
+ arizona_init_notifiers(codec);
ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
"ADSP2 Compressed IRQ", wm5110_adsp2_irq,
.max_register = WM8940_MONOMIX,
.reg_defaults = wm8940_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
.readable_reg = wm8940_readable_register,
.volatile_reg = wm8940_volatile_register,
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
};
int compr_direction;
int num_caps;
const struct wm_adsp_fw_caps *caps;
+ bool voice_trigger;
} wm_adsp_fw[WM_ADSP_NUM_FW] = {
[WM_ADSP_FW_MBC_VSS] = { .file = "mbc-vss" },
[WM_ADSP_FW_HIFI] = { .file = "hifi" },
.compr_direction = SND_COMPRESS_CAPTURE,
.num_caps = ARRAY_SIZE(ctrl_caps),
.caps = ctrl_caps,
+ .voice_trigger = true,
},
[WM_ADSP_FW_ASR] = { .file = "asr" },
[WM_ADSP_FW_TRACE] = {
dsp->running = false;
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA | ADSP2_CORE_ENA |
- ADSP2_START, 0);
+ ADSP2_CORE_ENA | ADSP2_START, 0);
/* Make sure DMAs are quiesced */
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0);
- regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, 0);
list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
goto out;
}
+ if (wm_adsp_fw[dsp->fw].voice_trigger && buf->irq_count == 2)
+ ret = WM_ADSP_COMPR_VOICE_TRIGGER;
+
out_notify:
if (compr && compr->stream)
snd_compr_fragment_elapsed(compr->stream);
buf = compr->buf;
- if (!compr->buf) {
- ret = -ENXIO;
- goto out;
- }
-
- if (compr->buf->error) {
+ if (!compr->buf || compr->buf->error) {
+ snd_compr_stop_error(stream, SNDRV_PCM_STATE_XRUN);
ret = -EIO;
goto out;
}
*/
if (buf->avail < wm_adsp_compr_frag_words(compr)) {
ret = wm_adsp_buffer_get_error(buf);
- if (ret < 0)
+ if (ret < 0) {
+ if (compr->buf->error)
+ snd_compr_stop_error(stream,
+ SNDRV_PCM_STATE_XRUN);
goto out;
+ }
ret = wm_adsp_buffer_reenable_irq(buf);
if (ret < 0) {
adsp_dbg(dsp, "Requested read of %zu bytes\n", count);
- if (!compr->buf)
- return -ENXIO;
-
- if (compr->buf->error)
+ if (!compr->buf || compr->buf->error) {
+ snd_compr_stop_error(compr->stream, SNDRV_PCM_STATE_XRUN);
return -EIO;
+ }
count /= WM_ADSP_DATA_WORD_SIZE;
#include "wmfw.h"
+/* Return values for wm_adsp_compr_handle_irq */
+#define WM_ADSP_COMPR_OK 0
+#define WM_ADSP_COMPR_VOICE_TRIGGER 1
+
struct wm_adsp_region {
int type;
unsigned int base;
};
static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
- .tx_dma_offset = 0x200,
- .rx_dma_offset = 0x284,
+ /* The CFG port offset will be calculated if it is needed */
+ .tx_dma_offset = 0,
+ .rx_dma_offset = 0,
.version = MCASP_VERSION_4,
};
pdata = pdev->dev.platform_data;
return pdata;
} else if (match) {
- pdata = (struct davinci_mcasp_pdata*) match->data;
+ pdata = devm_kmemdup(&pdev->dev, match->data, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev,
+ "Failed to allocate memory for pdata\n");
+ ret = -ENOMEM;
+ return pdata;
+ }
} else {
/* control shouldn't reach here. something is wrong */
ret = -EINVAL;
return PCM_EDMA;
}
+static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+ int i;
+ u32 offset = 0;
+
+ if (pdata->version != MCASP_VERSION_4)
+ return pdata->tx_dma_offset;
+
+ for (i = 0; i < pdata->num_serializer; i++) {
+ if (pdata->serial_dir[i] == TX_MODE) {
+ if (!offset) {
+ offset = DAVINCI_MCASP_TXBUF_REG(i);
+ } else {
+ pr_err("%s: Only one serializer allowed!\n",
+ __func__);
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
+static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+ int i;
+ u32 offset = 0;
+
+ if (pdata->version != MCASP_VERSION_4)
+ return pdata->rx_dma_offset;
+
+ for (i = 0; i < pdata->num_serializer; i++) {
+ if (pdata->serial_dir[i] == RX_MODE) {
+ if (!offset) {
+ offset = DAVINCI_MCASP_RXBUF_REG(i);
+ } else {
+ pr_err("%s: Only one serializer allowed!\n",
+ __func__);
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
static int davinci_mcasp_probe(struct platform_device *pdev)
{
struct snd_dmaengine_dai_dma_data *dma_data;
if (dat)
dma_data->addr = dat->start;
else
- dma_data->addr = mem->start + pdata->tx_dma_offset;
+ dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (dat)
dma_data->addr = dat->start;
else
- dma_data->addr = mem->start + pdata->rx_dma_offset;
+ dma_data->addr =
+ mem->start + davinci_mcasp_rxdma_offset(pdata);
dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
(n << 2))
/* Transmit Buffer for Serializer n */
-#define DAVINCI_MCASP_TXBUF_REG 0x200
+#define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2))
/* Receive Buffer for Serializer n */
-#define DAVINCI_MCASP_RXBUF_REG 0x280
+#define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2))
/* McASP FIFO Registers */
#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010)
Synopsys desigwnware I2S device. The device supports upto
maximum of 8 channels each for play and record.
+config SND_DESIGNWARE_PCM
+ tristate "PCM PIO extension for I2S driver"
+ depends on SND_DESIGNWARE_I2S
+ help
+ Say Y, M or N if you want to add a custom ALSA extension that registers
+ a PCM and uses PIO to transfer data.
+
+ This functionality is specially suited for I2S devices that don't have
+ DMA support.
# SYNOPSYS Platform Support
obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_i2s.o
-
+ifdef CONFIG_SND_DESIGNWARE_PCM
+obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_pcm.o
+endif
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
-
-/* common register for all channel */
-#define IER 0x000
-#define IRER 0x004
-#define ITER 0x008
-#define CER 0x00C
-#define CCR 0x010
-#define RXFFR 0x014
-#define TXFFR 0x018
-
-/* I2STxRxRegisters for all channels */
-#define LRBR_LTHR(x) (0x40 * x + 0x020)
-#define RRBR_RTHR(x) (0x40 * x + 0x024)
-#define RER(x) (0x40 * x + 0x028)
-#define TER(x) (0x40 * x + 0x02C)
-#define RCR(x) (0x40 * x + 0x030)
-#define TCR(x) (0x40 * x + 0x034)
-#define ISR(x) (0x40 * x + 0x038)
-#define IMR(x) (0x40 * x + 0x03C)
-#define ROR(x) (0x40 * x + 0x040)
-#define TOR(x) (0x40 * x + 0x044)
-#define RFCR(x) (0x40 * x + 0x048)
-#define TFCR(x) (0x40 * x + 0x04C)
-#define RFF(x) (0x40 * x + 0x050)
-#define TFF(x) (0x40 * x + 0x054)
-
-/* I2SCOMPRegisters */
-#define I2S_COMP_PARAM_2 0x01F0
-#define I2S_COMP_PARAM_1 0x01F4
-#define I2S_COMP_VERSION 0x01F8
-#define I2S_COMP_TYPE 0x01FC
-
-/*
- * Component parameter register fields - define the I2S block's
- * configuration.
- */
-#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25)
-#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22)
-#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19)
-#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16)
-#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9)
-#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7)
-#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6)
-#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5)
-#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4)
-#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2)
-#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0)
-
-#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10)
-#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7)
-#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3)
-#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0)
-
-/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */
-#define COMP_MAX_WORDSIZE (1 << 3)
-#define COMP_MAX_DATA_WIDTH (1 << 2)
-
-#define MAX_CHANNEL_NUM 8
-#define MIN_CHANNEL_NUM 2
-
-union dw_i2s_snd_dma_data {
- struct i2s_dma_data pd;
- struct snd_dmaengine_dai_dma_data dt;
-};
-
-struct dw_i2s_dev {
- void __iomem *i2s_base;
- struct clk *clk;
- int active;
- unsigned int capability;
- unsigned int quirks;
- unsigned int i2s_reg_comp1;
- unsigned int i2s_reg_comp2;
- struct device *dev;
- u32 ccr;
- u32 xfer_resolution;
- u32 fifo_th;
-
- /* data related to DMA transfers b/w i2s and DMAC */
- union dw_i2s_snd_dma_data play_dma_data;
- union dw_i2s_snd_dma_data capture_dma_data;
- struct i2s_clk_config_data config;
- int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
-};
+#include "local.h"
static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val)
{
}
}
-static void i2s_start(struct dw_i2s_dev *dev,
- struct snd_pcm_substream *substream)
+static inline void i2s_disable_irqs(struct dw_i2s_dev *dev, u32 stream,
+ int chan_nr)
+{
+ u32 i, irq;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < (chan_nr / 2); i++) {
+ irq = i2s_read_reg(dev->i2s_base, IMR(i));
+ i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x30);
+ }
+ } else {
+ for (i = 0; i < (chan_nr / 2); i++) {
+ irq = i2s_read_reg(dev->i2s_base, IMR(i));
+ i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x03);
+ }
+ }
+}
+
+static inline void i2s_enable_irqs(struct dw_i2s_dev *dev, u32 stream,
+ int chan_nr)
{
- struct i2s_clk_config_data *config = &dev->config;
u32 i, irq;
- i2s_write_reg(dev->i2s_base, IER, 1);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- for (i = 0; i < (config->chan_nr / 2); i++) {
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < (chan_nr / 2); i++) {
irq = i2s_read_reg(dev->i2s_base, IMR(i));
i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
}
- i2s_write_reg(dev->i2s_base, ITER, 1);
} else {
- for (i = 0; i < (config->chan_nr / 2); i++) {
+ for (i = 0; i < (chan_nr / 2); i++) {
irq = i2s_read_reg(dev->i2s_base, IMR(i));
i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
}
- i2s_write_reg(dev->i2s_base, IRER, 1);
+ }
+}
+
+static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
+{
+ struct dw_i2s_dev *dev = dev_id;
+ bool irq_valid = false;
+ u32 isr[4];
+ int i;
+
+ for (i = 0; i < 4; i++)
+ isr[i] = i2s_read_reg(dev->i2s_base, ISR(i));
+
+ i2s_clear_irqs(dev, SNDRV_PCM_STREAM_PLAYBACK);
+ i2s_clear_irqs(dev, SNDRV_PCM_STREAM_CAPTURE);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Check if TX fifo is empty. If empty fill FIFO with samples
+ * NOTE: Only two channels supported
+ */
+ if ((isr[i] & ISR_TXFE) && (i == 0) && dev->use_pio) {
+ dw_pcm_push_tx(dev);
+ irq_valid = true;
+ }
+
+ /* Data available. Record mode not supported in PIO mode */
+ if (isr[i] & ISR_RXDA)
+ irq_valid = true;
+
+ /* Error Handling: TX */
+ if (isr[i] & ISR_TXFO) {
+ dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
+ irq_valid = true;
+ }
+
+ /* Error Handling: TX */
+ if (isr[i] & ISR_RXFO) {
+ dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
+ irq_valid = true;
+ }
}
+ if (irq_valid)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static void i2s_start(struct dw_i2s_dev *dev,
+ struct snd_pcm_substream *substream)
+{
+ struct i2s_clk_config_data *config = &dev->config;
+
+ i2s_write_reg(dev->i2s_base, IER, 1);
+ i2s_enable_irqs(dev, substream->stream, config->chan_nr);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_write_reg(dev->i2s_base, ITER, 1);
+ else
+ i2s_write_reg(dev->i2s_base, IRER, 1);
+
i2s_write_reg(dev->i2s_base, CER, 1);
}
static void i2s_stop(struct dw_i2s_dev *dev,
struct snd_pcm_substream *substream)
{
- u32 i = 0, irq;
i2s_clear_irqs(dev, substream->stream);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
i2s_write_reg(dev->i2s_base, ITER, 0);
-
- for (i = 0; i < 4; i++) {
- irq = i2s_read_reg(dev->i2s_base, IMR(i));
- i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x30);
- }
- } else {
+ else
i2s_write_reg(dev->i2s_base, IRER, 0);
- for (i = 0; i < 4; i++) {
- irq = i2s_read_reg(dev->i2s_base, IMR(i));
- i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x03);
- }
- }
+ i2s_disable_irqs(dev, substream->stream, 8);
if (!dev->active) {
i2s_write_reg(dev->i2s_base, CER, 0);
static void dw_i2s_config(struct dw_i2s_dev *dev, int stream)
{
- u32 ch_reg, irq;
+ u32 ch_reg;
struct i2s_clk_config_data *config = &dev->config;
dev->xfer_resolution);
i2s_write_reg(dev->i2s_base, TFCR(ch_reg),
dev->fifo_th - 1);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
} else {
i2s_write_reg(dev->i2s_base, RCR(ch_reg),
dev->xfer_resolution);
i2s_write_reg(dev->i2s_base, RFCR(ch_reg),
dev->fifo_th - 1);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
}
break;
default:
- dev_err(dev->dev, "designware-i2s: unsuppted PCM fmt");
+ dev_err(dev->dev, "designware-i2s: unsupported PCM fmt");
return -EINVAL;
}
const struct i2s_platform_data *pdata = pdev->dev.platform_data;
struct dw_i2s_dev *dev;
struct resource *res;
- int ret;
+ int ret, irq;
struct snd_soc_dai_driver *dw_i2s_dai;
const char *clk_id;
dev->dev = &pdev->dev;
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq, i2s_irq_handler, 0,
+ pdev->name, dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+ }
+
dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
if (pdata) {
if (!pdata) {
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ dev_err(&pdev->dev,
+ "failed to register PCM, deferring probe\n");
+ return ret;
+ } else if (ret) {
dev_err(&pdev->dev,
- "Could not register PCM: %d\n", ret);
- goto err_clk_disable;
+ "Could not register DMA PCM: %d\n"
+ "falling back to PIO mode\n", ret);
+ ret = dw_pcm_register(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not register PIO PCM: %d\n",
+ ret);
+ goto err_clk_disable;
+ }
}
}
+
pm_runtime_enable(&pdev->dev);
return 0;
--- /dev/null
+/*
+ * ALSA SoC Synopsys PIO PCM for I2S driver
+ *
+ * sound/soc/dwc/designware_pcm.c
+ *
+ * Copyright (C) 2016 Synopsys
+ * Jose Abreu <joabreu@synopsys.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/rcupdate.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "local.h"
+
+#define BUFFER_BYTES_MAX (3 * 2 * 8 * PERIOD_BYTES_MIN)
+#define PERIOD_BYTES_MIN 4096
+#define PERIODS_MIN 2
+
+#define dw_pcm_tx_fn(sample_bits) \
+static unsigned int dw_pcm_tx_##sample_bits(struct dw_i2s_dev *dev, \
+ struct snd_pcm_runtime *runtime, unsigned int tx_ptr, \
+ bool *period_elapsed) \
+{ \
+ const u##sample_bits (*p)[2] = (void *)runtime->dma_area; \
+ unsigned int period_pos = tx_ptr % runtime->period_size; \
+ int i; \
+\
+ for (i = 0; i < dev->fifo_th; i++) { \
+ iowrite32(p[tx_ptr][0], dev->i2s_base + LRBR_LTHR(0)); \
+ iowrite32(p[tx_ptr][1], dev->i2s_base + RRBR_RTHR(0)); \
+ period_pos++; \
+ if (++tx_ptr >= runtime->buffer_size) \
+ tx_ptr = 0; \
+ } \
+ *period_elapsed = period_pos >= runtime->period_size; \
+ return tx_ptr; \
+}
+
+dw_pcm_tx_fn(16);
+dw_pcm_tx_fn(32);
+
+#undef dw_pcm_tx_fn
+
+static const struct snd_pcm_hardware dw_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .rates = SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 32000,
+ .rate_max = 48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = BUFFER_BYTES_MAX,
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN,
+ .periods_min = PERIODS_MIN,
+ .periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
+ .fifo_size = 16,
+};
+
+void dw_pcm_push_tx(struct dw_i2s_dev *dev)
+{
+ struct snd_pcm_substream *tx_substream;
+ bool tx_active, period_elapsed;
+
+ rcu_read_lock();
+ tx_substream = rcu_dereference(dev->tx_substream);
+ tx_active = tx_substream && snd_pcm_running(tx_substream);
+ if (tx_active) {
+ unsigned int tx_ptr = READ_ONCE(dev->tx_ptr);
+ unsigned int new_tx_ptr = dev->tx_fn(dev, tx_substream->runtime,
+ tx_ptr, &period_elapsed);
+ cmpxchg(&dev->tx_ptr, tx_ptr, new_tx_ptr);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(tx_substream);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(dw_pcm_push_tx);
+
+static int dw_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ snd_soc_set_runtime_hwparams(substream, &dw_pcm_hardware);
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ runtime->private_data = dev;
+
+ return 0;
+}
+
+static int dw_pcm_close(struct snd_pcm_substream *substream)
+{
+ synchronize_rcu();
+ return 0;
+}
+
+static int dw_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dw_i2s_dev *dev = runtime->private_data;
+ int ret;
+
+ switch (params_channels(hw_params)) {
+ case 2:
+ break;
+ default:
+ dev_err(dev->dev, "invalid channels number\n");
+ return -EINVAL;
+ }
+
+ switch (params_format(hw_params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ dev->tx_fn = dw_pcm_tx_16;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ dev->tx_fn = dw_pcm_tx_32;
+ break;
+ default:
+ dev_err(dev->dev, "invalid format\n");
+ return -EINVAL;
+ }
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) {
+ dev_err(dev->dev, "only playback is available\n");
+ return -EINVAL;
+ }
+
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
+}
+
+static int dw_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dw_i2s_dev *dev = runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ WRITE_ONCE(dev->tx_ptr, 0);
+ rcu_assign_pointer(dev->tx_substream, substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ rcu_assign_pointer(dev->tx_substream, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t dw_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dw_i2s_dev *dev = runtime->private_data;
+ snd_pcm_uframes_t pos = READ_ONCE(dev->tx_ptr);
+
+ return pos < runtime->buffer_size ? pos : 0;
+}
+
+static int dw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ size_t size = dw_pcm_hardware.buffer_bytes_max;
+
+ return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL), size, size);
+}
+
+static void dw_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static const struct snd_pcm_ops dw_pcm_ops = {
+ .open = dw_pcm_open,
+ .close = dw_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = dw_pcm_hw_params,
+ .hw_free = dw_pcm_hw_free,
+ .trigger = dw_pcm_trigger,
+ .pointer = dw_pcm_pointer,
+};
+
+static const struct snd_soc_platform_driver dw_pcm_platform = {
+ .pcm_new = dw_pcm_new,
+ .pcm_free = dw_pcm_free,
+ .ops = &dw_pcm_ops,
+};
+
+int dw_pcm_register(struct platform_device *pdev)
+{
+ return devm_snd_soc_register_platform(&pdev->dev, &dw_pcm_platform);
+}
+EXPORT_SYMBOL_GPL(dw_pcm_register);
--- /dev/null
+/*
+ * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DESIGNWARE_LOCAL_H
+#define __DESIGNWARE_LOCAL_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/designware_i2s.h>
+
+/* common register for all channel */
+#define IER 0x000
+#define IRER 0x004
+#define ITER 0x008
+#define CER 0x00C
+#define CCR 0x010
+#define RXFFR 0x014
+#define TXFFR 0x018
+
+/* Interrupt status register fields */
+#define ISR_TXFO BIT(5)
+#define ISR_TXFE BIT(4)
+#define ISR_RXFO BIT(1)
+#define ISR_RXDA BIT(0)
+
+/* I2STxRxRegisters for all channels */
+#define LRBR_LTHR(x) (0x40 * x + 0x020)
+#define RRBR_RTHR(x) (0x40 * x + 0x024)
+#define RER(x) (0x40 * x + 0x028)
+#define TER(x) (0x40 * x + 0x02C)
+#define RCR(x) (0x40 * x + 0x030)
+#define TCR(x) (0x40 * x + 0x034)
+#define ISR(x) (0x40 * x + 0x038)
+#define IMR(x) (0x40 * x + 0x03C)
+#define ROR(x) (0x40 * x + 0x040)
+#define TOR(x) (0x40 * x + 0x044)
+#define RFCR(x) (0x40 * x + 0x048)
+#define TFCR(x) (0x40 * x + 0x04C)
+#define RFF(x) (0x40 * x + 0x050)
+#define TFF(x) (0x40 * x + 0x054)
+
+/* I2SCOMPRegisters */
+#define I2S_COMP_PARAM_2 0x01F0
+#define I2S_COMP_PARAM_1 0x01F4
+#define I2S_COMP_VERSION 0x01F8
+#define I2S_COMP_TYPE 0x01FC
+
+/*
+ * Component parameter register fields - define the I2S block's
+ * configuration.
+ */
+#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25)
+#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22)
+#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19)
+#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16)
+#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9)
+#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7)
+#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6)
+#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5)
+#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4)
+#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2)
+#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0)
+
+#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10)
+#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7)
+#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3)
+#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0)
+
+/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */
+#define COMP_MAX_WORDSIZE (1 << 3)
+#define COMP_MAX_DATA_WIDTH (1 << 2)
+
+#define MAX_CHANNEL_NUM 8
+#define MIN_CHANNEL_NUM 2
+
+union dw_i2s_snd_dma_data {
+ struct i2s_dma_data pd;
+ struct snd_dmaengine_dai_dma_data dt;
+};
+
+struct dw_i2s_dev {
+ void __iomem *i2s_base;
+ struct clk *clk;
+ int active;
+ unsigned int capability;
+ unsigned int quirks;
+ unsigned int i2s_reg_comp1;
+ unsigned int i2s_reg_comp2;
+ struct device *dev;
+ u32 ccr;
+ u32 xfer_resolution;
+ u32 fifo_th;
+
+ /* data related to DMA transfers b/w i2s and DMAC */
+ union dw_i2s_snd_dma_data play_dma_data;
+ union dw_i2s_snd_dma_data capture_dma_data;
+ struct i2s_clk_config_data config;
+ int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+
+ /* data related to PIO transfers (TX) */
+ bool use_pio;
+ struct snd_pcm_substream __rcu *tx_substream;
+ unsigned int (*tx_fn)(struct dw_i2s_dev *dev,
+ struct snd_pcm_runtime *runtime, unsigned int tx_ptr,
+ bool *period_elapsed);
+ unsigned int tx_ptr;
+};
+
+#if IS_ENABLED(CONFIG_SND_DESIGNWARE_PCM)
+void dw_pcm_push_tx(struct dw_i2s_dev *dev);
+int dw_pcm_register(struct platform_device *pdev);
+#else
+void dw_pcm_push_tx(struct dw_i2s_dev *dev) { }
+int dw_pcm_register(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
config SND_SOC_FSL_ASRC
tristate "Asynchronous Sample Rate Converter (ASRC) module support"
+ depends on HAS_DMA
select REGMAP_MMIO
select SND_SOC_GENERIC_DMAENGINE_PCM
help
if (*pos >= size * 2) {
*pos = 0;
} else if (unlikely((*pos % size) + 3 > size)) {
- dev_err(&pdev->dev, "User bit receivce buffer overflow\n");
+ dev_err(&pdev->dev, "User bit receive buffer overflow\n");
return;
}
ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
+ regmap_update_bits(regs, CCSR_SSI_STCCR,
+ CCSR_SSI_SxCCR_DC_MASK,
+ CCSR_SSI_SxCCR_DC(2));
+ regmap_update_bits(regs, CCSR_SSI_SRCCR,
+ CCSR_SSI_SxCCR_DC_MASK,
+ CCSR_SSI_SxCCR_DC(2));
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFS:
ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
- regmap_update_bits(regs, CCSR_SSI_STCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
- regmap_update_bits(regs, CCSR_SSI_SRCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
break;
case SND_SOC_DAIFMT_CBM_CFM:
ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
+config SND_SIMPLE_CARD_UTILS
+ tristate
+
config SND_SIMPLE_CARD
tristate "ASoC Simple sound card support"
+ select SND_SIMPLE_CARD_UTILS
help
This option enables generic simple sound card support
+obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o
+
snd-soc-simple-card-objs := simple-card.o
obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
--- /dev/null
+/*
+ * simple-card-core.c
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/of.h>
+#include <sound/simple_card_utils.h>
+
+int asoc_simple_card_parse_daifmt(struct device *dev,
+ struct device_node *node,
+ struct device_node *codec,
+ char *prefix,
+ unsigned int *retfmt)
+{
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ int prefix_len = prefix ? strlen(prefix) : 0;
+ unsigned int daifmt;
+
+ daifmt = snd_soc_of_parse_daifmt(node, prefix,
+ &bitclkmaster, &framemaster);
+ daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+
+ if (prefix_len && !bitclkmaster && !framemaster) {
+ /*
+ * No dai-link level and master setting was not found from
+ * sound node level, revert back to legacy DT parsing and
+ * take the settings from codec node.
+ */
+ dev_dbg(dev, "Revert to legacy daifmt parsing\n");
+
+ daifmt = snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
+ (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
+ } else {
+ if (codec == bitclkmaster)
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
+ else
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
+ }
+
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+
+ *retfmt = daifmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_daifmt);
+
+int asoc_simple_card_set_dailink_name(struct device *dev,
+ struct snd_soc_dai_link *dai_link,
+ const char *fmt, ...)
+{
+ va_list ap;
+ char *name = NULL;
+ int ret = -ENOMEM;
+
+ va_start(ap, fmt);
+ name = devm_kvasprintf(dev, GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (name) {
+ ret = 0;
+
+ dai_link->name = name;
+ dai_link->stream_name = name;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_set_dailink_name);
+
+int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
+ char *prefix)
+{
+ char prop[128];
+ int ret;
+
+ snprintf(prop, sizeof(prop), "%sname", prefix);
+
+ /* Parse the card name from DT */
+ ret = snd_soc_of_parse_card_name(card, prop);
+ if (ret < 0)
+ return ret;
+
+ if (!card->name && card->dai_link)
+ card->name = card->dai_link->name;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
#include <sound/soc-dai.h>
#include <sound/soc.h>
+struct asoc_simple_jack {
+ struct snd_soc_jack jack;
+ struct snd_soc_jack_pin pin;
+ struct snd_soc_jack_gpio gpio;
+};
+
struct simple_card_data {
struct snd_soc_card snd_card;
struct simple_dai_props {
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
- int gpio_hp_det;
- int gpio_hp_det_invert;
- int gpio_mic_det;
- int gpio_mic_det_invert;
+ struct asoc_simple_jack hp_jack;
+ struct asoc_simple_jack mic_jack;
struct snd_soc_dai_link dai_link[]; /* dynamically allocated */
};
#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + i)
#define simple_priv_to_props(priv, i) ((priv)->dai_props + i)
+#define PREFIX "simple-audio-card,"
+
+#define asoc_simple_card_init_hp(card, sjack, prefix)\
+ asoc_simple_card_init_jack(card, sjack, 1, prefix)
+#define asoc_simple_card_init_mic(card, sjack, prefix)\
+ asoc_simple_card_init_jack(card, sjack, 0, prefix)
+static int asoc_simple_card_init_jack(struct snd_soc_card *card,
+ struct asoc_simple_jack *sjack,
+ int is_hp, char *prefix)
+{
+ struct device *dev = card->dev;
+ enum of_gpio_flags flags;
+ char prop[128];
+ char *pin_name;
+ char *gpio_name;
+ int mask;
+ int det;
+
+ sjack->gpio.gpio = -ENOENT;
+
+ if (is_hp) {
+ snprintf(prop, sizeof(prop), "%shp-det-gpio", prefix);
+ pin_name = "Headphones";
+ gpio_name = "Headphone detection";
+ mask = SND_JACK_HEADPHONE;
+ } else {
+ snprintf(prop, sizeof(prop), "%smic-det-gpio", prefix);
+ pin_name = "Mic Jack";
+ gpio_name = "Mic detection";
+ mask = SND_JACK_MICROPHONE;
+ }
+
+ det = of_get_named_gpio_flags(dev->of_node, prop, 0, &flags);
+ if (det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (gpio_is_valid(det)) {
+ sjack->pin.pin = pin_name;
+ sjack->pin.mask = mask;
+
+ sjack->gpio.name = gpio_name;
+ sjack->gpio.report = mask;
+ sjack->gpio.gpio = det;
+ sjack->gpio.invert = !!(flags & OF_GPIO_ACTIVE_LOW);
+ sjack->gpio.debounce_time = 150;
+
+ snd_soc_card_jack_new(card, pin_name, mask,
+ &sjack->jack,
+ &sjack->pin, 1);
+
+ snd_soc_jack_add_gpios(&sjack->jack, 1,
+ &sjack->gpio);
+ }
+
+ return 0;
+}
+
+static void asoc_simple_card_remove_jack(struct asoc_simple_jack *sjack)
+{
+ if (gpio_is_valid(sjack->gpio.gpio))
+ snd_soc_jack_free_gpios(&sjack->jack, 1, &sjack->gpio);
+}
+
static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
.hw_params = asoc_simple_card_hw_params,
};
-static struct snd_soc_jack simple_card_hp_jack;
-static struct snd_soc_jack_pin simple_card_hp_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-static struct snd_soc_jack_gpio simple_card_hp_jack_gpio = {
- .name = "Headphone detection",
- .report = SND_JACK_HEADPHONE,
- .debounce_time = 150,
-};
-
-static struct snd_soc_jack simple_card_mic_jack;
-static struct snd_soc_jack_pin simple_card_mic_jack_pins[] = {
- {
- .pin = "Mic Jack",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-static struct snd_soc_jack_gpio simple_card_mic_jack_gpio = {
- .name = "Mic detection",
- .report = SND_JACK_MICROPHONE,
- .debounce_time = 150,
-};
-
static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
struct asoc_simple_dai *set)
{
if (ret < 0)
return ret;
- if (gpio_is_valid(priv->gpio_hp_det)) {
- snd_soc_card_jack_new(rtd->card, "Headphones",
- SND_JACK_HEADPHONE,
- &simple_card_hp_jack,
- simple_card_hp_jack_pins,
- ARRAY_SIZE(simple_card_hp_jack_pins));
-
- simple_card_hp_jack_gpio.gpio = priv->gpio_hp_det;
- simple_card_hp_jack_gpio.invert = priv->gpio_hp_det_invert;
- snd_soc_jack_add_gpios(&simple_card_hp_jack, 1,
- &simple_card_hp_jack_gpio);
- }
+ ret = asoc_simple_card_init_hp(rtd->card, &priv->hp_jack, PREFIX);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+ if (ret < 0)
+ return ret;
- if (gpio_is_valid(priv->gpio_mic_det)) {
- snd_soc_card_jack_new(rtd->card, "Mic Jack",
- SND_JACK_MICROPHONE,
- &simple_card_mic_jack,
- simple_card_mic_jack_pins,
- ARRAY_SIZE(simple_card_mic_jack_pins));
- simple_card_mic_jack_gpio.gpio = priv->gpio_mic_det;
- simple_card_mic_jack_gpio.invert = priv->gpio_mic_det_invert;
- snd_soc_jack_add_gpios(&simple_card_mic_jack, 1,
- &simple_card_mic_jack_gpio);
- }
return 0;
}
u32 val;
int ret;
+ if (!np)
+ return 0;
+
/*
* Get node via "sound-dai = <&phandle port>"
* it will be used as xxx_of_node on soc_bind_dai_link()
*args_count = args.args_count;
/* Get dai->name */
- ret = snd_soc_of_get_dai_name(np, name);
- if (ret < 0)
- return ret;
+ if (name) {
+ ret = snd_soc_of_get_dai_name(np, name);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!dai)
+ return 0;
/* Parse TDM slot */
ret = snd_soc_of_parse_tdm_slot(np, &dai->tx_slot_mask,
return 0;
}
-static int asoc_simple_card_parse_daifmt(struct device_node *node,
- struct simple_card_data *priv,
- struct device_node *codec,
- char *prefix, int idx)
-{
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
- struct device *dev = simple_priv_to_dev(priv);
- struct device_node *bitclkmaster = NULL;
- struct device_node *framemaster = NULL;
- unsigned int daifmt;
-
- daifmt = snd_soc_of_parse_daifmt(node, prefix,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
-
- if (strlen(prefix) && !bitclkmaster && !framemaster) {
- /*
- * No dai-link level and master setting was not found from
- * sound node level, revert back to legacy DT parsing and
- * take the settings from codec node.
- */
- dev_dbg(dev, "Revert to legacy daifmt parsing\n");
-
- daifmt = snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
- (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
- } else {
- if (codec == bitclkmaster)
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
- else
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
- }
-
- dai_link->dai_fmt = daifmt;
-
- of_node_put(bitclkmaster);
- of_node_put(framemaster);
-
- return 0;
-}
-
static int asoc_simple_card_dai_link_of(struct device_node *node,
struct simple_card_data *priv,
int idx,
struct device_node *cpu = NULL;
struct device_node *plat = NULL;
struct device_node *codec = NULL;
- char *name;
char prop[128];
char *prefix = "";
int ret, cpu_args;
/* For single DAI link & old style of DT node */
if (is_top_level_node)
- prefix = "simple-audio-card,";
+ prefix = PREFIX;
snprintf(prop, sizeof(prop), "%scpu", prefix);
cpu = of_get_child_by_name(node, prop);
goto dai_link_of_err;
}
- ret = asoc_simple_card_parse_daifmt(node, priv,
- codec, prefix, idx);
+ ret = asoc_simple_card_parse_daifmt(dev, node, codec,
+ prefix, &dai_link->dai_fmt);
if (ret < 0)
goto dai_link_of_err;
if (ret < 0)
goto dai_link_of_err;
+ ret = asoc_simple_card_sub_parse_of(plat, NULL,
+ &dai_link->platform_of_node,
+ NULL, NULL);
+ if (ret < 0)
+ goto dai_link_of_err;
+
if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name) {
ret = -EINVAL;
goto dai_link_of_err;
}
- if (plat) {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(plat, "sound-dai",
- "#sound-dai-cells", 0, &args);
- dai_link->platform_of_node = args.np;
- } else {
- /* Assumes platform == cpu */
+ /* Assumes platform == cpu */
+ if (!dai_link->platform_of_node)
dai_link->platform_of_node = dai_link->cpu_of_node;
- }
- /* DAI link name is created from CPU/CODEC dai name */
- name = devm_kzalloc(dev,
- strlen(dai_link->cpu_dai_name) +
- strlen(dai_link->codec_dai_name) + 2,
- GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "%s-%s",
+ dai_link->cpu_dai_name,
+ dai_link->codec_dai_name);
+ if (ret < 0)
goto dai_link_of_err;
- }
- sprintf(name, "%s-%s", dai_link->cpu_dai_name,
- dai_link->codec_dai_name);
- dai_link->name = dai_link->stream_name = name;
dai_link->ops = &asoc_simple_card_ops;
dai_link->init = asoc_simple_card_dai_init;
struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
- enum of_gpio_flags flags;
u32 val;
int ret;
if (!node)
return -EINVAL;
- /* Parse the card name from DT */
- snd_soc_of_parse_card_name(&priv->snd_card, "simple-audio-card,name");
-
/* The off-codec widgets */
- if (of_property_read_bool(node, "simple-audio-card,widgets")) {
+ if (of_property_read_bool(node, PREFIX "widgets")) {
ret = snd_soc_of_parse_audio_simple_widgets(&priv->snd_card,
- "simple-audio-card,widgets");
+ PREFIX "widgets");
if (ret)
return ret;
}
/* DAPM routes */
- if (of_property_read_bool(node, "simple-audio-card,routing")) {
+ if (of_property_read_bool(node, PREFIX "routing")) {
ret = snd_soc_of_parse_audio_routing(&priv->snd_card,
- "simple-audio-card,routing");
+ PREFIX "routing");
if (ret)
return ret;
}
/* Factor to mclk, used in hw_params() */
- ret = of_property_read_u32(node, "simple-audio-card,mclk-fs", &val);
+ ret = of_property_read_u32(node, PREFIX "mclk-fs", &val);
if (ret == 0)
priv->mclk_fs = val;
- dev_dbg(dev, "New simple-card: %s\n", priv->snd_card.name ?
- priv->snd_card.name : "");
-
/* Single/Muti DAI link(s) & New style of DT node */
- if (of_get_child_by_name(node, "simple-audio-card,dai-link")) {
+ if (of_get_child_by_name(node, PREFIX "dai-link")) {
struct device_node *np = NULL;
int i = 0;
return ret;
}
- priv->gpio_hp_det = of_get_named_gpio_flags(node,
- "simple-audio-card,hp-det-gpio", 0, &flags);
- priv->gpio_hp_det_invert = !!(flags & OF_GPIO_ACTIVE_LOW);
- if (priv->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- priv->gpio_mic_det = of_get_named_gpio_flags(node,
- "simple-audio-card,mic-det-gpio", 0, &flags);
- priv->gpio_mic_det_invert = !!(flags & OF_GPIO_ACTIVE_LOW);
- if (priv->gpio_mic_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (!priv->snd_card.name)
- priv->snd_card.name = priv->snd_card.dai_link->name;
+ ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX);
+ if (ret)
+ return ret;
return 0;
}
int num_links, ret;
/* Get the number of DAI links */
- if (np && of_get_child_by_name(np, "simple-audio-card,dai-link"))
+ if (np && of_get_child_by_name(np, PREFIX "dai-link"))
num_links = of_get_child_count(np);
else
num_links = 1;
priv->snd_card.dai_link = dai_link;
priv->snd_card.num_links = num_links;
- priv->gpio_hp_det = -ENOENT;
- priv->gpio_mic_det = -ENOENT;
-
/* Get room for the other properties */
priv->dai_props = devm_kzalloc(dev,
sizeof(*priv->dai_props) * num_links,
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct simple_card_data *priv = snd_soc_card_get_drvdata(card);
- if (gpio_is_valid(priv->gpio_hp_det))
- snd_soc_jack_free_gpios(&simple_card_hp_jack, 1,
- &simple_card_hp_jack_gpio);
- if (gpio_is_valid(priv->gpio_mic_det))
- snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
- &simple_card_mic_jack_gpio);
+ asoc_simple_card_remove_jack(&priv->hp_jack);
+ asoc_simple_card_remove_jack(&priv->mic_jack);
return asoc_simple_card_unref(card);
}
help
This adds support for ASoC machine driver for Intel(R) MID Medfield platform
used as alsa device in audio substem in Intel(R) MID devices
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SST_MFLD_PLATFORM
tristate
select SND_SST_IPC
select SND_SOC_INTEL_SST
- depends on ACPI
config SND_SOC_INTEL_SST
tristate
select SND_SOC_INTEL_SST_MATCH if ACPI
depends on (X86 || COMPILE_TEST)
+# firmware stuff depends DW_DMAC_CORE; since there is no depends-on from
+# the reverse selection, each machine driver needs to select
+# SND_SOC_INTEL_SST_FIRMWARE carefully depending on DW_DMAC_CORE
+config SND_SOC_INTEL_SST_FIRMWARE
+ tristate
+
config SND_SOC_INTEL_SST_ACPI
tristate
config SND_SOC_INTEL_HASWELL_MACH
tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on DW_DMAC_CORE=y
+ depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
Ultrabook platforms.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
+config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
+ tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
+ depends on X86 && ACPI && I2C
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_HDA_DSP_LOADER
+ help
+ This adds support for ASoC machine driver for Broxton-P platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
config SND_SOC_INTEL_BXT_RT298_MACH
tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
depends on X86 && ACPI && I2C
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BYT_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
+ depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
help
This adds audio driver for Intel Baytrail platform based boards
with the RT5640 audio codec. This driver is deprecated, use
- SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality
+ SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
config SND_SOC_INTEL_BYT_MAX98090_MACH
tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
+ depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
help
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
I2C_DESIGNWARE_PLATFORM
- depends on DW_DMAC_CORE=y
+ depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
Ultrabook platforms.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BYTCR_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
- depends on X86 && I2C
+ depends on X86 && I2C && ACPI
select SND_SOC_RT5640
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
platforms with RT5640 audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BYTCR_RT5651_MACH
tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
- depends on X86 && I2C
+ depends on X86 && I2C && ACPI
select SND_SOC_RT5651
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
platforms with RT5651 audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
- depends on X86_INTEL_LPSS && I2C
+ depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT5670
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5672 audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
- depends on X86_INTEL_LPSS && I2C
+ depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT5645
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
- depends on X86_INTEL_LPSS && I2C
+ depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_MAX98090
select SND_SOC_TS3A227E
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
select SND_SOC_INTEL_SST_MATCH if ACPI
help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
- If unsure select "N".
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
+ If unsure select "N".
config SND_SOC_INTEL_SKYLAKE
tristate
help
This adds support for ASoC machine driver for Skylake platforms
with RT286 I2S audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + SSM4567.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + MAX98357A.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
case SNDRV_PCM_TRIGGER_START:
if (stream->compr_ops->stream_start)
return stream->compr_ops->stream_start(sst->dev, stream->id);
+ break;
case SNDRV_PCM_TRIGGER_STOP:
if (stream->compr_ops->stream_drop)
return stream->compr_ops->stream_drop(sst->dev, stream->id);
+ break;
case SND_COMPR_TRIGGER_DRAIN:
if (stream->compr_ops->stream_drain)
return stream->compr_ops->stream_drain(sst->dev, stream->id);
+ break;
case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
if (stream->compr_ops->stream_partial_drain)
return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
+ break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (stream->compr_ops->stream_pause)
return stream->compr_ops->stream_pause(sst->dev, stream->id);
+ break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (stream->compr_ops->stream_pause_release)
return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
- default:
- return -EINVAL;
+ break;
}
+ return -EINVAL;
}
static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
+#include <linux/dmi.h>
#include <linux/acpi.h>
#include <asm/platform_sst_audio.h>
#include <sound/core.h>
dev_err(dev, "No matching machine driver found\n");
return -ENODEV;
}
+ if (mach->machine_quirk)
+ mach = mach->machine_quirk(mach);
+
pdata = mach->pdata;
ret = kstrtouint(id->id, 16, &dev_id);
return 0;
}
+static unsigned long cht_machine_id;
+
+#define CHT_SURFACE_MACH 1
+
+static int cht_surface_quirk_cb(const struct dmi_system_id *id)
+{
+ cht_machine_id = CHT_SURFACE_MACH;
+ return 1;
+}
+
+
+static const struct dmi_system_id cht_table[] = {
+ {
+ .callback = cht_surface_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
+};
+
+
+static struct sst_acpi_mach cht_surface_mach = {
+ "10EC5640", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
+ &chv_platform_data };
+
+static struct sst_acpi_mach *cht_quirk(void *arg)
+{
+ struct sst_acpi_mach *mach = arg;
+
+ dmi_check_system(cht_table);
+
+ if (cht_machine_id == CHT_SURFACE_MACH)
+ return &cht_surface_mach;
+ else
+ return mach;
+}
+
static struct sst_acpi_mach sst_acpi_bytcr[] = {
{"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
&byt_rvp_platform_data },
{"193C9890", "cht-bsw-max98090", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
&chv_platform_data },
/* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
- {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", NULL,
+ {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", cht_quirk,
&chv_platform_data },
{},
snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
snd-soc-sst-broadwell-objs := broadwell.o
+snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
+obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-rt5640.o
--- /dev/null
+/*
+ * Intel Broxton-P I2S Machine Driver
+ *
+ * Copyright (C) 2016, Intel Corporation. All rights reserved.
+ *
+ * Modified from:
+ * Intel Skylake I2S Machine driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../../codecs/da7219.h"
+#include "../../codecs/da7219-aad.h"
+
+#define BXT_DIALOG_CODEC_DAI "da7219-hifi"
+#define BXT_MAXIM_CODEC_DAI "HiFi"
+#define DUAL_CHANNEL 2
+
+static struct snd_soc_jack broxton_headset;
+
+enum {
+ BXT_DPCM_AUDIO_PB = 0,
+ BXT_DPCM_AUDIO_CP,
+ BXT_DPCM_AUDIO_REF_CP,
+ BXT_DPCM_AUDIO_HDMI1_PB,
+ BXT_DPCM_AUDIO_HDMI2_PB,
+ BXT_DPCM_AUDIO_HDMI3_PB,
+};
+
+static const struct snd_kcontrol_new broxton_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
+static const struct snd_soc_dapm_widget broxton_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("HDMI1", NULL),
+ SND_SOC_DAPM_SPK("HDMI2", NULL),
+ SND_SOC_DAPM_SPK("HDMI3", NULL),
+};
+
+static const struct snd_soc_dapm_route broxton_map[] = {
+ /* HP jack connectors - unknown if we have jack detection */
+ {"Headphone Jack", NULL, "HPL"},
+ {"Headphone Jack", NULL, "HPR"},
+
+ /* speaker */
+ {"Spk", NULL, "Speaker"},
+
+ /* other jacks */
+ {"MIC", NULL, "Headset Mic"},
+
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+
+ /* CODEC BE connections */
+ {"HiFi Playback", NULL, "ssp5 Tx"},
+ {"ssp5 Tx", NULL, "codec0_out"},
+
+ {"Playback", NULL, "ssp1 Tx"},
+ {"ssp1 Tx", NULL, "codec1_out"},
+
+ {"codec0_in", NULL, "ssp1 Rx"},
+ {"ssp1 Rx", NULL, "Capture"},
+
+ {"HDMI1", NULL, "hif5 Output"},
+ {"HDMI2", NULL, "hif6 Output"},
+ {"HDMI3", NULL, "hif7 Output"},
+
+ {"hifi3", NULL, "iDisp3 Tx"},
+ {"iDisp3 Tx", NULL, "iDisp3_out"},
+ {"hifi2", NULL, "iDisp2 Tx"},
+ {"iDisp2 Tx", NULL, "iDisp2_out"},
+ {"hifi1", NULL, "iDisp1 Tx"},
+ {"iDisp1 Tx", NULL, "iDisp1_out"},
+
+ /* DMIC */
+ {"dmic01_hifi", NULL, "DMIC01 Rx"},
+ {"DMIC01 Rx", NULL, "DMIC AIF"},
+};
+
+static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = DUAL_CHANNEL;
+
+ /* set SSP to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_codec *codec = rtd->codec;
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3, &broxton_headset,
+ NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ da7219_aad_jack_det(codec, &broxton_headset);
+
+ snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+
+ return ret;
+}
+
+static int broxton_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *dai = rtd->codec_dai;
+
+ return hdac_hdmi_jack_init(dai, BXT_DPCM_AUDIO_HDMI1_PB + dai->id);
+}
+
+static int broxton_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
+static unsigned int rates[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int bxt_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops broxton_da7219_fe_ops = {
+ .startup = bxt_fe_startup,
+};
+
+static int broxton_da7219_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ DA7219_CLKSRC_MCLK, 19200000, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(codec_dai->dev, "can't set codec sysclk configuration\n");
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_PLL_SRM, 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "failed to start PLL: %d\n", ret);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static int broxton_da7219_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "failed to stop PLL: %d\n", ret);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_ops broxton_da7219_ops = {
+ .hw_params = broxton_da7219_hw_params,
+ .hw_free = broxton_da7219_hw_free,
+};
+
+/* broxton digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link broxton_dais[] = {
+ /* Front End DAI links */
+ [BXT_DPCM_AUDIO_PB]
+ {
+ .name = "Bxt Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = broxton_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &broxton_da7219_fe_ops,
+ },
+ [BXT_DPCM_AUDIO_CP]
+ {
+ .name = "Bxt Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &broxton_da7219_fe_ops,
+ },
+ [BXT_DPCM_AUDIO_REF_CP]
+ {
+ .name = "Bxt Audio Reference cap",
+ .stream_name = "Refcap",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [BXT_DPCM_AUDIO_HDMI1_PB]
+ {
+ .name = "Bxt HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [BXT_DPCM_AUDIO_HDMI2_PB]
+ {
+ .name = "Bxt HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [BXT_DPCM_AUDIO_HDMI3_PB]
+ {
+ .name = "Bxt HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ /* Back End DAI links */
+ {
+ /* SSP5 - Codec */
+ .name = "SSP5-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP5 Pin",
+ .platform_name = "0000:00:0e.0",
+ .no_pcm = 1,
+ .codec_name = "MX98357A:00",
+ .codec_dai_name = BXT_MAXIM_CODEC_DAI,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = broxton_ssp_fixup,
+ .dpcm_playback = 1,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:0e.0",
+ .no_pcm = 1,
+ .codec_name = "i2c-DLGS7219:00",
+ .codec_dai_name = BXT_DIALOG_CODEC_DAI,
+ .init = broxton_da7219_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = broxton_ssp_fixup,
+ .ops = &broxton_da7219_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:0e.0",
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:0e.0",
+ .init = broxton_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:0e.0",
+ .init = broxton_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:0e.0",
+ .init = broxton_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+/* broxton audio machine driver for SPT + da7219 */
+static struct snd_soc_card broxton_audio_card = {
+ .name = "bxtda7219max",
+ .owner = THIS_MODULE,
+ .dai_link = broxton_dais,
+ .num_links = ARRAY_SIZE(broxton_dais),
+ .controls = broxton_controls,
+ .num_controls = ARRAY_SIZE(broxton_controls),
+ .dapm_widgets = broxton_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(broxton_widgets),
+ .dapm_routes = broxton_map,
+ .num_dapm_routes = ARRAY_SIZE(broxton_map),
+ .fully_routed = true,
+};
+
+static int broxton_audio_probe(struct platform_device *pdev)
+{
+ broxton_audio_card.dev = &pdev->dev;
+ return devm_snd_soc_register_card(&pdev->dev, &broxton_audio_card);
+}
+
+static struct platform_driver broxton_audio = {
+ .probe = broxton_audio_probe,
+ .driver = {
+ .name = "bxt_da7219_max98357a_i2s",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(broxton_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio Machine driver-DA7219 & MAX98357A in I2S mode");
+MODULE_AUTHOR("Sathyanarayana Nujella <sathyanarayana.nujella@intel.com>");
+MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com>");
+MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
+MODULE_AUTHOR("Conrad Cooke <conrad.cooke@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bxt_da7219_max98357a_i2s");
BXT_DPCM_AUDIO_PB = 0,
BXT_DPCM_AUDIO_CP,
BXT_DPCM_AUDIO_REF_CP,
+ BXT_DPCM_AUDIO_DMIC_CP,
BXT_DPCM_AUDIO_HDMI1_PB,
BXT_DPCM_AUDIO_HDMI2_PB,
BXT_DPCM_AUDIO_HDMI3_PB,
/* CODEC BE connections */
{ "AIF1 Playback", NULL, "ssp5 Tx"},
{ "ssp5 Tx", NULL, "codec0_out"},
+ { "ssp5 Tx", NULL, "codec1_out"},
{ "codec0_in", NULL, "ssp5 Rx" },
{ "ssp5 Rx", NULL, "AIF1 Capture" },
};
+static int broxton_rt298_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
return ret;
rt298_mic_detect(codec, &broxton_headset);
+
+ snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+
return 0;
}
.hw_params = broxton_rt298_hw_params,
};
+static unsigned int rates[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static unsigned int channels_dmic[] = {
+ 2, 4,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+ .count = ARRAY_SIZE(channels_dmic),
+ .list = channels_dmic,
+ .mask = 0,
+};
+
+static int broxton_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_max = 4;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_dmic_channels);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops broxton_dmic_ops = {
+ .startup = broxton_dmic_startup,
+};
+
+static unsigned int channels[] = {
+ 2,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int bxt_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * on this platform for PCM device we support:
+ * 48Khz
+ * stereo
+ */
+
+ runtime->hw.channels_max = 2;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops broxton_rt286_fe_ops = {
+ .startup = bxt_fe_startup,
+};
+
/* broxton digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link broxton_rt298_dais[] = {
/* Front End DAI links */
.dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
+ .init = broxton_rt298_fe_init,
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_playback = 1,
+ .ops = &broxton_rt286_fe_ops,
},
[BXT_DPCM_AUDIO_CP]
{
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_capture = 1,
+ .ops = &broxton_rt286_fe_ops,
},
[BXT_DPCM_AUDIO_REF_CP]
{
.nonatomic = 1,
.dynamic = 1,
},
+ [BXT_DPCM_AUDIO_DMIC_CP]
+ {
+ .name = "Bxt Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &broxton_dmic_ops,
+ },
[BXT_DPCM_AUDIO_HDMI1_PB]
{
.name = "Bxt HDMI Port1",
.codec_name = "dmic-codec",
.codec_dai_name = "dmic-hifi",
.platform_name = "0000:00:0e.0",
+ .be_hw_params_fixup = broxton_dmic_fixup,
.ignore_suspend = 1,
.dpcm_capture = 1,
.no_pcm = 1,
.probe = broxton_audio_probe,
.driver = {
.name = "bxt_alc298s_i2s",
+ .pm = &snd_soc_pm_ops,
},
};
module_platform_driver(broxton_audio)
#include <sound/jack.h>
#include "../../codecs/rt5645.h"
#include "../atom/sst-atom-controls.h"
+#include "../common/sst-acpi.h"
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5645-aif1"
};
static struct cht_acpi_card snd_soc_cards[] = {
+ {"10EC5640", CODEC_TYPE_RT5645, &snd_soc_card_chtrt5645},
{"10EC5645", CODEC_TYPE_RT5645, &snd_soc_card_chtrt5645},
{"10EC5650", CODEC_TYPE_RT5650, &snd_soc_card_chtrt5650},
};
+static char cht_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
struct cht_mc_private *drv;
struct snd_soc_card *card = snd_soc_cards[0].soc_card;
char codec_name[16];
+ struct sst_acpi_mach *mach;
+ const char *i2c_name = NULL;
+ int dai_index = 0;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
if (!drv)
}
}
card->dev = &pdev->dev;
+ mach = card->dev->platform_data;
sprintf(codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
/* set correct codec name */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
- if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00"))
+ if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00")) {
card->dai_link[i].codec_name = kstrdup(codec_name, GFP_KERNEL);
+ dai_index = i;
+ }
+
+ /* fixup codec name based on HID */
+ i2c_name = sst_acpi_find_name_from_hid(mach->id);
+ if (i2c_name != NULL) {
+ snprintf(cht_rt5640_codec_name, sizeof(cht_rt5640_codec_name),
+ "%s%s", "i2c-", i2c_name);
+ cht_dailink[dai_index].codec_name = cht_rt5640_codec_name;
+ }
snd_soc_card_set_drvdata(card, drv);
ret_val = devm_snd_soc_register_card(&pdev->dev, card);
#include <sound/soc.h>
#include "../../codecs/nau8825.h"
#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_MAXIM_CODEC_DAI "HiFi"
+#define DMIC_CH(p) p->list[p->count-1]
static struct snd_soc_jack skylake_headset;
static struct snd_soc_card skylake_audio_card;
+static const struct snd_pcm_hw_constraint_list *dmic_constraints;
struct skl_hdmi_pcm {
struct list_head head;
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- if (params_channels(params) == 2)
+ if (params_channels(params) == 2 || DMIC_CH(dmic_constraints) == 2)
channels->min = channels->max = 2;
else
channels->min = channels->max = 4;
.mask = 0,
};
+static const unsigned int dmic_2ch[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
+ .count = ARRAY_SIZE(dmic_2ch),
+ .list = dmic_2ch,
+ .mask = 0,
+};
+
static int skylake_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- runtime->hw.channels_max = 4;
+ runtime->hw.channels_max = DMIC_CH(dmic_constraints);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- &constraints_dmic_channels);
+ dmic_constraints);
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
.list = rates_16000,
};
+static const unsigned int ch_mono[] = {
+ 1,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
static int skylake_refcap_startup(struct snd_pcm_substream *substream)
{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_16000);
static int skylake_audio_probe(struct platform_device *pdev)
{
struct skl_nau8825_private *ctx;
+ struct skl_machine_pdata *pdata;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (pdata)
+ dmic_constraints = pdata->dmic_num == 2 ?
+ &constraints_dmic_2ch : &constraints_dmic_channels;
+
return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
}
+static const struct platform_device_id skl_board_ids[] = {
+ { .name = "skl_n88l25_m98357a" },
+ { .name = "kbl_n88l25_m98357a" },
+ { }
+};
+
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
.driver = {
- .name = "skl_nau88l25_max98357a_i2s",
+ .name = "skl_n88l25_m98357a",
.pm = &snd_soc_pm_ops,
},
+ .id_table = skl_board_ids,
};
module_platform_driver(skylake_audio)
MODULE_DESCRIPTION("Audio Machine driver-NAU88L25 & MAX98357A in I2S mode");
MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_nau88l25_max98357a_i2s");
+MODULE_ALIAS("platform:skl_n88l25_m98357a");
+MODULE_ALIAS("platform:kbl_n88l25_m98357a");
#include <sound/pcm_params.h>
#include "../../codecs/nau8825.h"
#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_SSM_CODEC_DAI "ssm4567-hifi"
+#define DMIC_CH(p) p->list[p->count-1]
static struct snd_soc_jack skylake_headset;
static struct snd_soc_card skylake_audio_card;
+static const struct snd_pcm_hw_constraint_list *dmic_constraints;
struct skl_hdmi_pcm {
struct list_head head;
{
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- if (params_channels(params) == 2)
+ if (params_channels(params) == 2 || DMIC_CH(dmic_constraints) == 2)
channels->min = channels->max = 2;
else
channels->min = channels->max = 4;
.mask = 0,
};
+static const unsigned int dmic_2ch[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
+ .count = ARRAY_SIZE(dmic_2ch),
+ .list = dmic_2ch,
+ .mask = 0,
+};
+
static int skylake_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- runtime->hw.channels_max = 4;
+ runtime->hw.channels_max = DMIC_CH(dmic_constraints);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- &constraints_dmic_channels);
+ dmic_constraints);
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
.list = rates_16000,
};
+static const unsigned int ch_mono[] = {
+ 1,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
static int skylake_refcap_startup(struct snd_pcm_substream *substream)
{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_16000);
static int skylake_audio_probe(struct platform_device *pdev)
{
struct skl_nau88125_private *ctx;
+ struct skl_machine_pdata *pdata;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (pdata)
+ dmic_constraints = pdata->dmic_num == 2 ?
+ &constraints_dmic_2ch : &constraints_dmic_channels;
+
return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
}
+static const struct platform_device_id skl_board_ids[] = {
+ { .name = "skl_n88l25_s4567" },
+ { .name = "kbl_n88l25_s4567" },
+ { }
+};
+
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
.driver = {
- .name = "skl_nau88l25_ssm4567_i2s",
+ .name = "skl_n88l25_s4567",
.pm = &snd_soc_pm_ops,
},
+ .id_table = skl_board_ids,
};
module_platform_driver(skylake_audio)
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_DESCRIPTION("Intel Audio Machine driver for SKL with NAU88L25 and SSM4567 in I2S Mode");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_nau88l25_ssm4567_i2s");
+MODULE_ALIAS("platform:skl_n88l25_s4567");
+MODULE_ALIAS("platform:kbl_n88l25_s4567");
return devm_snd_soc_register_card(&pdev->dev, &skylake_rt286);
}
+static const struct platform_device_id skl_board_ids[] = {
+ { .name = "skl_alc286s_i2s" },
+ { .name = "kbl_alc286s_i2s" },
+ { }
+};
+
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
.driver = {
.name = "skl_alc286s_i2s",
.pm = &snd_soc_pm_ops,
},
+ .id_table = skl_board_ids,
+
};
module_platform_driver(skylake_audio)
MODULE_DESCRIPTION("Intel SST Audio for Skylake");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:skl_alc286s_i2s");
+MODULE_ALIAS("platform:kbl_alc286s_i2s");
snd-soc-sst-acpi-objs := sst-acpi.o
snd-soc-sst-match-objs := sst-match-acpi.o
snd-soc-sst-ipc-objs := sst-ipc.o
-
-snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o
+snd-soc-sst-firmware-objs := sst-firmware.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_FIRMWARE) += snd-soc-sst-firmware.o
#if IS_ENABLED(CONFIG_ACPI)
const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
#else
-inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
+static inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
{
return NULL;
}
/* board name */
const char *board;
- void (*machine_quirk)(void);
+ struct sst_acpi_mach * (*machine_quirk)(void *arg);
void *pdata;
};
u32 index, void *private);
void sst_mem_block_unregister_all(struct sst_dsp *dsp);
-/* Create/Free DMA resources */
-int sst_dma_new(struct sst_dsp *sst);
-void sst_dma_free(struct sst_dma *dma);
-
u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
enum sst_mem_type type);
#endif
}
reg = sst_dsp_shim_read_unlocked(ctx, offset);
- dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
+ dev_dbg(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
(time < timeout) ? "successful" : "timedout");
ret = time < timeout ? 0 : -ETIME;
}
EXPORT_SYMBOL_GPL(sst_dsp_inbox_read);
-#ifdef CONFIG_DW_DMAC_CORE
-struct sst_dsp *sst_dsp_new(struct device *dev,
- struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
-{
- struct sst_dsp *sst;
- int err;
-
- dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
-
- sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
- if (sst == NULL)
- return NULL;
-
- spin_lock_init(&sst->spinlock);
- mutex_init(&sst->mutex);
- sst->dev = dev;
- sst->dma_dev = pdata->dma_dev;
- sst->thread_context = sst_dev->thread_context;
- sst->sst_dev = sst_dev;
- sst->id = pdata->id;
- sst->irq = pdata->irq;
- sst->ops = sst_dev->ops;
- sst->pdata = pdata;
- INIT_LIST_HEAD(&sst->used_block_list);
- INIT_LIST_HEAD(&sst->free_block_list);
- INIT_LIST_HEAD(&sst->module_list);
- INIT_LIST_HEAD(&sst->fw_list);
- INIT_LIST_HEAD(&sst->scratch_block_list);
-
- /* Initialise SST Audio DSP */
- if (sst->ops->init) {
- err = sst->ops->init(sst, pdata);
- if (err < 0)
- return NULL;
- }
-
- /* Register the ISR */
- err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
- sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
- if (err)
- goto irq_err;
-
- err = sst_dma_new(sst);
- if (err)
- dev_warn(dev, "sst_dma_new failed %d\n", err);
-
- return sst;
-
-irq_err:
- if (sst->ops->free)
- sst->ops->free(sst);
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(sst_dsp_new);
-
-void sst_dsp_free(struct sst_dsp *sst)
-{
- free_irq(sst->irq, sst);
- if (sst->ops->free)
- sst->ops->free(sst);
-
- sst_dma_free(sst->dma);
-}
-EXPORT_SYMBOL_GPL(sst_dsp_free);
-#endif
-
/* Module information */
MODULE_AUTHOR("Liam Girdwood");
MODULE_DESCRIPTION("Intel SST Core");
void *dsp;
};
-#ifdef CONFIG_DW_DMAC_CORE
+#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
/* Initialization */
struct sst_dsp *sst_dsp_new(struct device *dev,
struct sst_dsp_device *sst_dev, struct sst_pdata *pdata);
}
}
EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
+
+struct sst_dsp *sst_dsp_new(struct device *dev,
+ struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
+{
+ struct sst_dsp *sst;
+ int err;
+
+ dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
+
+ sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
+ if (sst == NULL)
+ return NULL;
+
+ spin_lock_init(&sst->spinlock);
+ mutex_init(&sst->mutex);
+ sst->dev = dev;
+ sst->dma_dev = pdata->dma_dev;
+ sst->thread_context = sst_dev->thread_context;
+ sst->sst_dev = sst_dev;
+ sst->id = pdata->id;
+ sst->irq = pdata->irq;
+ sst->ops = sst_dev->ops;
+ sst->pdata = pdata;
+ INIT_LIST_HEAD(&sst->used_block_list);
+ INIT_LIST_HEAD(&sst->free_block_list);
+ INIT_LIST_HEAD(&sst->module_list);
+ INIT_LIST_HEAD(&sst->fw_list);
+ INIT_LIST_HEAD(&sst->scratch_block_list);
+
+ /* Initialise SST Audio DSP */
+ if (sst->ops->init) {
+ err = sst->ops->init(sst, pdata);
+ if (err < 0)
+ return NULL;
+ }
+
+ /* Register the ISR */
+ err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
+ sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
+ if (err)
+ goto irq_err;
+
+ err = sst_dma_new(sst);
+ if (err)
+ dev_warn(dev, "sst_dma_new failed %d\n", err);
+
+ return sst;
+
+irq_err:
+ if (sst->ops->free)
+ sst->ops->free(sst);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_new);
+
+void sst_dsp_free(struct sst_dsp *sst)
+{
+ free_irq(sst->irq, sst);
+ if (sst->ops->free)
+ sst->ops->free(sst);
+
+ sst_dma_free(sst->dma);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_free);
+
+MODULE_DESCRIPTION("Intel SST Firmware Loader");
+MODULE_LICENSE("GPL v2");
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
- snd_soc_pcm_set_drvdata(rtd, pcm_data);
pcm_data->substream = substream;
snd_soc_set_runtime_hwparams(substream, &hsw_pcm_hardware);
# Skylake IPC Support
snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
- skl-sst.o bxt-sst.o
+ skl-sst.o bxt-sst.o skl-sst-utils.o
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
#define BXT_ADSP_SRAM1_BASE 0xA0000
+#define BXT_INSTANCE_ID 0
+#define BXT_BASE_FW_MODULE_ID 0
+
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
}
+/*
+ * First boot sequence has some extra steps. Core 0 waits for power
+ * status on core 1, so power up core 1 also momentarily, keep it in
+ * reset/stall and then turn it off
+ */
static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
const void *fwdata, u32 fwsize)
{
u32 reg;
stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
- if (stream_tag < 0) {
+ if (stream_tag <= 0) {
dev_err(ctx->dev, "Failed to prepare DMA FW loading err: %x\n",
stream_tag);
return stream_tag;
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
- /* Purge FW request */
+ /* Step 1: Power up core 0 and core1 */
+ ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK |
+ SKL_DSP_CORE_MASK(1));
+ if (ret < 0) {
+ dev_err(ctx->dev, "dsp core0/1 power up failed\n");
+ goto base_fw_load_failed;
+ }
+
+ /* Step 2: Purge FW request */
sst_dsp_shim_write(ctx, SKL_ADSP_REG_HIPCI, SKL_ADSP_REG_HIPCI_BUSY |
- BXT_IPC_PURGE_FW | (stream_tag - 1));
+ (BXT_IPC_PURGE_FW | ((stream_tag - 1) << 9)));
- ret = skl_dsp_enable_core(ctx);
+ /* Step 3: Unset core0 reset state & unstall/run core0 */
+ ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
+ dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
+ /* Step 4: Wait for DONE Bit */
for (i = BXT_INIT_TIMEOUT; i > 0; --i) {
reg = sst_dsp_shim_read(ctx, SKL_ADSP_REG_HIPCIE);
SKL_ADSP_REG_HIPCIE_DONE);
}
- /* enable Interrupt */
+ /* Step 5: power down core1 */
+ ret = skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
+ if (ret < 0) {
+ dev_err(ctx->dev, "dsp core1 power down failed\n");
+ goto base_fw_load_failed;
+ }
+
+ /* Step 6: Enable Interrupt */
skl_ipc_int_enable(ctx);
skl_ipc_op_int_enable(ctx);
+ /* Step 7: Wait for ROM init */
for (i = BXT_INIT_TIMEOUT; i > 0; --i) {
if (SKL_FW_INIT ==
(sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS) &
base_fw_load_failed:
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
- skl_dsp_disable_core(ctx);
+ skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
return ret;
}
return ret;
}
+#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
+
static int bxt_load_base_firmware(struct sst_dsp *ctx)
{
- const struct firmware *fw = NULL;
+ struct firmware stripped_fw;
struct skl_sst *skl = ctx->thread_context;
int ret;
- ret = request_firmware(&fw, ctx->fw_name, ctx->dev);
+ ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
goto sst_load_base_firmware_failed;
}
- ret = sst_bxt_prepare_fw(ctx, fw->data, fw->size);
+ /* check for extended manifest */
+ if (ctx->fw == NULL)
+ goto sst_load_base_firmware_failed;
+
+ ret = snd_skl_parse_uuids(ctx, BXT_ADSP_FW_BIN_HDR_OFFSET);
+ if (ret < 0)
+ goto sst_load_base_firmware_failed;
+
+ stripped_fw.data = ctx->fw->data;
+ stripped_fw.size = ctx->fw->size;
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
+ ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
/* Retry Enabling core and ROM load. Retry seemed to help */
if (ret < 0) {
- ret = sst_bxt_prepare_fw(ctx, fw->data, fw->size);
+ ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
+ dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
+ sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
+ sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
+
dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret);
goto sst_load_base_firmware_failed;
}
sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
- skl_dsp_disable_core(ctx);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
} else {
dev_dbg(ctx->dev, "Firmware download successful\n");
ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "DSP boot fail, FW Ready timeout\n");
- skl_dsp_disable_core(ctx);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
ret = -EIO;
} else {
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
ret = 0;
+ skl->fw_loaded = true;
}
}
sst_load_base_firmware_failed:
- release_firmware(fw);
+ release_firmware(ctx->fw);
return ret;
}
-static int bxt_set_dsp_D0(struct sst_dsp *ctx)
+static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_sst *skl = ctx->thread_context;
int ret;
+ struct skl_ipc_dxstate_info dx;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- skl->boot_complete = false;
-
- ret = skl_dsp_enable_core(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "enable dsp core failed ret: %d\n", ret);
+ if (skl->fw_loaded == false) {
+ skl->boot_complete = false;
+ ret = bxt_load_base_firmware(ctx);
+ if (ret < 0)
+ dev_err(ctx->dev, "reload fw failed: %d\n", ret);
return ret;
}
- /* enable interrupt */
- skl_ipc_int_enable(ctx);
- skl_ipc_op_int_enable(ctx);
+ /* If core 0 is being turned on, turn on core 1 as well */
+ if (core_id == SKL_DSP_CORE0_ID)
+ ret = skl_dsp_core_power_up(ctx, core_mask |
+ SKL_DSP_CORE_MASK(1));
+ else
+ ret = skl_dsp_core_power_up(ctx, core_mask);
+
+ if (ret < 0)
+ goto err;
+
+ if (core_id == SKL_DSP_CORE0_ID) {
+
+ /*
+ * Enable interrupt after SPA is set and before
+ * DSP is unstalled
+ */
+ skl_ipc_int_enable(ctx);
+ skl_ipc_op_int_enable(ctx);
+ skl->boot_complete = false;
+ }
- ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
- msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
- if (ret == 0) {
- dev_err(ctx->dev, "ipc: error DSP boot timeout\n");
- dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
- sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
- sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
- return -EIO;
+ ret = skl_dsp_start_core(ctx, core_mask);
+ if (ret < 0)
+ goto err;
+
+ if (core_id == SKL_DSP_CORE0_ID) {
+ ret = wait_event_timeout(skl->boot_wait,
+ skl->boot_complete,
+ msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+
+ /* If core 1 was turned on for booting core 0, turn it off */
+ skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
+ if (ret == 0) {
+ dev_err(ctx->dev, "%s: DSP boot timeout\n", __func__);
+ dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
+ sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
+ sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
+ dev_err(ctx->dev, "Failed to set core0 to D0 state\n");
+ ret = -EIO;
+ goto err;
+ }
+ }
+
+ /* Tell FW if additional core in now On */
+
+ if (core_id != SKL_DSP_CORE0_ID) {
+ dx.core_mask = core_mask;
+ dx.dx_mask = core_mask;
+
+ ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
+ BXT_BASE_FW_MODULE_ID, &dx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "IPC set_dx for core %d fail: %d\n",
+ core_id, ret);
+ goto err;
+ }
}
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+ skl->cores.state[core_id] = SKL_DSP_RUNNING;
return 0;
+err:
+ if (core_id == SKL_DSP_CORE0_ID)
+ core_mask |= SKL_DSP_CORE_MASK(1);
+ skl_dsp_disable_core(ctx, core_mask);
+
+ return ret;
}
-static int bxt_set_dsp_D3(struct sst_dsp *ctx)
+static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
+ int ret;
struct skl_ipc_dxstate_info dx;
struct skl_sst *skl = ctx->thread_context;
- int ret = 0;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- if (!is_skl_dsp_running(ctx))
- return ret;
-
- dx.core_mask = SKL_DSP_CORE0_MASK;
+ dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
- ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
- SKL_BASE_FW_MODULE_ID, &dx);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to set DSP to D3 state: %d\n", ret);
- return ret;
- }
+ dev_dbg(ctx->dev, "core mask=%x dx_mask=%x\n",
+ dx.core_mask, dx.dx_mask);
+
+ ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
+ BXT_BASE_FW_MODULE_ID, &dx);
+ if (ret < 0)
+ dev_err(ctx->dev,
+ "Failed to set DSP to D3:core id = %d;Continue reset\n",
+ core_id);
- ret = skl_dsp_disable_core(ctx);
+ ret = skl_dsp_disable_core(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "disbale dsp core failed: %d\n", ret);
- ret = -EIO;
+ dev_err(ctx->dev, "Failed to disable core %d", ret);
+ return ret;
}
-
- skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+ skl->cores.state[core_id] = SKL_DSP_RESET;
return 0;
}
skl->dev = dev;
skl_dev.thread_context = skl;
+ INIT_LIST_HEAD(&skl->uuid_list);
skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
if (!skl->dsp) {
sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+ INIT_LIST_HEAD(&sst->module_list);
ret = skl_ipc_init(dev, skl);
if (ret)
return ret;
+ skl->cores.count = 2;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
return ret;
}
+ skl_dsp_init_core_state(sst);
+
if (dsp)
*dsp = skl;
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
+ skl_freeup_uuid_list(ctx);
skl_ipc_free(&ctx->ipc);
ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
.init = skl_sst_dsp_init,
.cleanup = skl_sst_dsp_cleanup
},
+ {
+ .id = 0x9d71,
+ .loader_ops = skl_get_loader_ops,
+ .init = skl_sst_dsp_init,
+ .cleanup = skl_sst_dsp_cleanup
+ },
{
.id = 0x5a98,
.loader_ops = bxt_get_loader_ops,
dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
module_config->id.module_id, param_size);
- print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
+ print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
*param_data, param_size, false);
return 0;
}
dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
- /* If pipe is not started, do not try to stop the pipe in FW. */
+ /* If pipe is started, do stop the pipe in FW. */
if (pipe->state > SKL_PIPE_STARTED) {
ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
if (ret < 0) {
}
pipe->state = SKL_PIPE_PAUSED;
- } else {
- /* If pipe was not created in FW, do not try to delete it */
- if (pipe->state < SKL_PIPE_CREATED)
- return 0;
+ }
- ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
- if (ret < 0)
- dev_err(ctx->dev, "Failed to delete pipeline\n");
+ /* If pipe was not created in FW, do not try to delete it */
+ if (pipe->state < SKL_PIPE_CREATED)
+ return 0;
- pipe->state = SKL_PIPE_INVALID;
+ ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to delete pipeline\n");
+ return ret;
}
+ pipe->state = SKL_PIPE_INVALID;
+
return ret;
}
return ret;
}
- pipe->state = SKL_PIPE_CREATED;
+ pipe->state = SKL_PIPE_PAUSED;
+
+ return 0;
+}
+
+/*
+ * Reset the pipeline by sending set pipe state IPC this will reset the DMA
+ * from the DSP side
+ */
+int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+ int ret;
+
+ /* If pipe was not created in FW, do not try to pause or delete */
+ if (pipe->state < SKL_PIPE_PAUSED)
+ return 0;
+
+ ret = skl_set_pipe_state(ctx, pipe, PPL_RESET);
+ if (ret < 0) {
+ dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret);
+ return ret;
+ }
+
+ pipe->state = SKL_PIPE_RESET;
return 0;
}
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
+#include <linux/pci.h>
#include "skl.h"
/* Unique identification for getting NHLT blobs */
return NULL;
}
+int skl_get_dmic_geo(struct skl *skl)
+{
+ struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+ struct nhlt_endpoint *epnt;
+ struct nhlt_dmic_array_config *cfg;
+ struct device *dev = &skl->pci->dev;
+ unsigned int dmic_geo = 0;
+ u8 j;
+
+ epnt = (struct nhlt_endpoint *)nhlt->desc;
+
+ for (j = 0; j < nhlt->endpoint_count; j++) {
+ if (epnt->linktype == NHLT_LINK_DMIC) {
+ cfg = (struct nhlt_dmic_array_config *)
+ (epnt->config.caps);
+ switch (cfg->array_type) {
+ case NHLT_MIC_ARRAY_2CH_SMALL:
+ case NHLT_MIC_ARRAY_2CH_BIG:
+ dmic_geo |= MIC_ARRAY_2CH;
+ break;
+
+ case NHLT_MIC_ARRAY_4CH_1ST_GEOM:
+ case NHLT_MIC_ARRAY_4CH_L_SHAPED:
+ case NHLT_MIC_ARRAY_4CH_2ND_GEOM:
+ dmic_geo |= MIC_ARRAY_4CH;
+ break;
+
+ default:
+ dev_warn(dev, "undefined DMIC array_type 0x%0x\n",
+ cfg->array_type);
+
+ }
+ }
+ epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ }
+
+ return dmic_geo;
+}
+
static void skl_nhlt_trim_space(struct skl *skl)
{
char *s = skl->tplg_name;
u64 length;
} __packed;
+#define MIC_ARRAY_2CH 2
+#define MIC_ARRAY_4CH 4
+
+struct nhlt_tdm_config {
+ u8 virtual_slot;
+ u8 config_type;
+} __packed;
+
+struct nhlt_dmic_array_config {
+ struct nhlt_tdm_config tdm_config;
+ u8 array_type;
+} __packed;
+
+enum {
+ NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
+ NHLT_MIC_ARRAY_2CH_BIG = 0xb,
+ NHLT_MIC_ARRAY_4CH_1ST_GEOM = 0xc,
+ NHLT_MIC_ARRAY_4CH_L_SHAPED = 0xd,
+ NHLT_MIC_ARRAY_4CH_2ND_GEOM = 0xe,
+ NHLT_MIC_ARRAY_VENDOR_DEFINED = 0xf,
+};
+
#endif
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+ struct skl *skl = get_skl_ctx(dai->dev);
unsigned int format_val;
int err;
+ struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+ mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+
format_val = skl_get_format(substream, dai);
dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
hdac_stream(stream)->stream_tag, format_val);
snd_hdac_stream_reset(hdac_stream(stream));
+ /* In case of XRUN recovery, reset the FW pipe to clean state */
+ if (mconfig && (substream->runtime->status->state ==
+ SNDRV_PCM_STATE_XRUN))
+ skl_reset_pipe(skl->skl_sst, mconfig->pipe);
+
err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
if (err < 0)
return err;
struct skl_dma_params *dma_params;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct hdac_ext_link *link;
+ struct skl *skl = get_skl_ctx(dai->dev);
+ struct skl_module_cfg *mconfig = NULL;
dma_params = (struct skl_dma_params *)
snd_soc_dai_get_dma_data(codec_dai, substream);
snd_hdac_ext_link_stream_reset(link_dev);
+ /* In case of XRUN recovery, reset the FW pipe to clean state */
+ mconfig = skl_tplg_be_get_cpr_module(dai, substream->stream);
+ if (mconfig && (substream->runtime->status->state ==
+ SNDRV_PCM_STATE_XRUN))
+ skl_reset_pipe(skl->skl_sst, mconfig->pipe);
+
snd_hdac_ext_link_stream_setup(link_dev, format_val);
snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
return 0;
}
-/* calculate runtime delay from LPIB */
-static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
- struct hdac_ext_stream *sstream,
- unsigned int pos)
-{
- struct hdac_bus *bus = ebus_to_hbus(ebus);
- struct hdac_stream *hstream = hdac_stream(sstream);
- struct snd_pcm_substream *substream = hstream->substream;
- int stream = substream->stream;
- unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
- int delay;
-
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- delay = pos - lpib_pos;
- else
- delay = lpib_pos - pos;
-
- if (delay < 0) {
- if (delay >= hstream->delay_negative_threshold)
- delay = 0;
- else
- delay += hstream->bufsize;
- }
-
- if (hstream->bufsize == delay)
- delay = 0;
-
- if (delay >= hstream->period_bytes) {
- dev_info(bus->dev,
- "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
- delay, hstream->period_bytes);
- delay = 0;
- }
-
- return bytes_to_frames(substream->runtime, delay);
-}
-
-static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
- int codec_delay)
+static snd_pcm_uframes_t skl_platform_pcm_pointer
+ (struct snd_pcm_substream *substream)
{
- struct hdac_stream *hstr = hdac_stream(hstream);
- struct snd_pcm_substream *substream = hstr->substream;
- struct hdac_ext_bus *ebus;
+ struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
unsigned int pos;
- int delay;
/* use the position buffer as default */
pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
if (pos >= hdac_stream(hstream)->bufsize)
pos = 0;
- if (substream->runtime) {
- ebus = get_bus_ctx(substream);
- delay = skl_get_delay_from_lpib(ebus, hstream, pos)
- + codec_delay;
- substream->runtime->delay += delay;
- }
-
- return pos;
-}
-
-static snd_pcm_uframes_t skl_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
-
- return bytes_to_frames(substream->runtime,
- skl_get_position(hstream, 0));
+ return bytes_to_frames(substream->runtime, pos);
}
static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
static int skl_platform_soc_probe(struct snd_soc_platform *platform)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(platform->dev);
+ struct skl *skl = ebus_to_skl(ebus);
+ int ret;
- if (ebus->ppcap)
- return skl_tplg_init(platform, ebus);
+ if (ebus->ppcap) {
+ ret = skl_tplg_init(platform, ebus);
+ if (ret < 0) {
+ dev_err(platform->dev, "Failed to init topology!\n");
+ return ret;
+ }
+ skl->platform = platform;
+ }
return 0;
}
mutex_unlock(&ctx->mutex);
}
-static int skl_dsp_core_set_reset_state(struct sst_dsp *ctx)
+/*
+ * Initialize core power state and usage count. To be called after
+ * successful first boot. Hence core 0 will be running and other cores
+ * will be reset
+ */
+void skl_dsp_init_core_state(struct sst_dsp *ctx)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ int i;
+
+ skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
+ skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1;
+
+ for (i = SKL_DSP_CORE0_ID + 1; i < SKL_DSP_CORES_MAX; i++) {
+ skl->cores.state[i] = SKL_DSP_RESET;
+ skl->cores.usage_count[i] = 0;
+ }
+}
+
+/* Get the mask for all enabled cores */
+unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ unsigned int core_mask, en_cores_mask;
+ u32 val;
+
+ core_mask = SKL_DSP_CORES_MASK(skl->cores.count);
+
+ val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
+
+ /* Cores having CPA bit set */
+ en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >>
+ SKL_ADSPCS_CPA_SHIFT;
+
+ /* And cores having CRST bit cleared */
+ en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >>
+ SKL_ADSPCS_CRST_SHIFT;
+
+ /* And cores having CSTALL bit cleared */
+ en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >>
+ SKL_ADSPCS_CSTALL_SHIFT;
+ en_cores_mask &= core_mask;
+
+ dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask);
+
+ return en_cores_mask;
+}
+
+static int
+skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx,
- SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
+ SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask),
+ SKL_ADSPCS_CRST_MASK(core_mask));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CRST_MASK,
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
+ SKL_ADSPCS_CRST_MASK(core_mask),
+ SKL_ADSPCS_CRST_MASK(core_mask),
SKL_DSP_RESET_TO,
"Set reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
- dev_err(ctx->dev, "Set reset state failed\n");
+ SKL_ADSPCS_CRST_MASK(core_mask)) !=
+ SKL_ADSPCS_CRST_MASK(core_mask)) {
+ dev_err(ctx->dev, "Set reset state failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx)
+int skl_dsp_core_unset_reset_state(
+ struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CRST_MASK, 0);
+ SKL_ADSPCS_CRST_MASK(core_mask), 0);
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CRST_MASK,
+ SKL_ADSPCS_CRST_MASK(core_mask),
0,
SKL_DSP_RESET_TO,
"Unset reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
- dev_err(ctx->dev, "Unset reset state failed\n");
+ SKL_ADSPCS_CRST_MASK(core_mask)) != 0) {
+ dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static bool is_skl_dsp_core_enable(struct sst_dsp *ctx)
+static bool
+is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
{
int val;
bool is_enable;
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
- is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
- (val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
- !(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
- !(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
+ is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) &&
+ (val & SKL_ADSPCS_SPA_MASK(core_mask)) &&
+ !(val & SKL_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & SKL_ADSPCS_CSTALL_MASK(core_mask)));
+
+ dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ is_enable, core_mask);
- dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
return is_enable;
}
-static int skl_dsp_reset_core(struct sst_dsp *ctx)
+static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
{
/* stall core */
- sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+ SKL_ADSPCS_CSTALL_MASK(core_mask),
+ SKL_ADSPCS_CSTALL_MASK(core_mask));
/* set reset state */
- return skl_dsp_core_set_reset_state(ctx);
+ return skl_dsp_core_set_reset_state(ctx, core_mask);
}
-static int skl_dsp_start_core(struct sst_dsp *ctx)
+int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* unset reset state */
- ret = skl_dsp_core_unset_reset_state(ctx);
- if (ret < 0) {
- dev_dbg(ctx->dev, "dsp unset reset fails\n");
+ ret = skl_dsp_core_unset_reset_state(ctx, core_mask);
+ if (ret < 0)
return ret;
- }
/* run core */
- dev_dbg(ctx->dev, "run core...\n");
- sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- ~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
-
- if (!is_skl_dsp_core_enable(ctx)) {
- skl_dsp_reset_core(ctx);
- dev_err(ctx->dev, "DSP core enable failed\n");
+ dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask);
+ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+ SKL_ADSPCS_CSTALL_MASK(core_mask), 0);
+
+ if (!is_skl_dsp_core_enable(ctx, core_mask)) {
+ skl_dsp_reset_core(ctx, core_mask);
+ dev_err(ctx->dev, "DSP start core failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static int skl_dsp_core_power_up(struct sst_dsp *ctx)
+int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
+ SKL_ADSPCS_SPA_MASK(core_mask),
+ SKL_ADSPCS_SPA_MASK(core_mask));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CPA_MASK,
- SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
+ SKL_ADSPCS_CPA_MASK(core_mask),
+ SKL_ADSPCS_CPA_MASK(core_mask),
SKL_DSP_PU_TO,
"Power up");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
- SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
- dev_err(ctx->dev, "DSP core power up failed\n");
+ SKL_ADSPCS_CPA_MASK(core_mask)) !=
+ SKL_ADSPCS_CPA_MASK(core_mask)) {
+ dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static int skl_dsp_core_power_down(struct sst_dsp *ctx)
+int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_SPA_MASK, 0);
+ SKL_ADSPCS_SPA_MASK(core_mask), 0);
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CPA_MASK,
+ SKL_ADSPCS_CPA_MASK(core_mask),
0,
SKL_DSP_PD_TO,
"Power down");
}
-int skl_dsp_enable_core(struct sst_dsp *ctx)
+int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* power up */
- ret = skl_dsp_core_power_up(ctx);
+ ret = skl_dsp_core_power_up(ctx, core_mask);
if (ret < 0) {
- dev_dbg(ctx->dev, "dsp core power up failed\n");
+ dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n",
+ core_mask);
return ret;
}
- return skl_dsp_start_core(ctx);
+ return skl_dsp_start_core(ctx, core_mask);
}
-int skl_dsp_disable_core(struct sst_dsp *ctx)
+int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
- ret = skl_dsp_reset_core(ctx);
+ ret = skl_dsp_reset_core(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "dsp core reset failed\n");
+ dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n",
+ core_mask);
return ret;
}
/* power down core*/
- ret = skl_dsp_core_power_down(ctx);
+ ret = skl_dsp_core_power_down(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "dsp core power down failed\n");
+ dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n",
+ core_mask, ret);
return ret;
}
- if (is_skl_dsp_core_enable(ctx)) {
- dev_err(ctx->dev, "DSP core disable failed\n");
+ if (is_skl_dsp_core_enable(ctx, core_mask)) {
+ dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n",
+ core_mask, ret);
ret = -EIO;
}
{
int ret;
- if (is_skl_dsp_core_enable(ctx)) {
- dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
- ret = skl_dsp_reset_core(ctx);
+ if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) {
+ ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp reset failed\n");
+ dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret);
return ret;
}
- ret = skl_dsp_start_core(ctx);
+ ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp start failed\n");
+ dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret);
return ret;
}
} else {
- dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
- ret = skl_dsp_disable_core(ctx);
-
+ ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp disable core failes\n");
+ dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret);
return ret;
}
- ret = skl_dsp_enable_core(ctx);
+ ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
}
return ret;
return result;
}
+/*
+ * skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context
+ * within the dapm mutex. Hence no separate lock is used.
+ */
+int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ int ret = 0;
+
+ if (core_id >= skl->cores.count) {
+ dev_err(ctx->dev, "invalid core id: %d\n", core_id);
+ return -EINVAL;
+ }
+
+ if (skl->cores.state[core_id] == SKL_DSP_RESET) {
+ ret = ctx->fw_ops.set_state_D0(ctx, core_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "unable to get core%d\n", core_id);
+ return ret;
+ }
+ }
+
+ skl->cores.usage_count[core_id]++;
+
+ dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
+ core_id, skl->cores.state[core_id],
+ skl->cores.usage_count[core_id]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_dsp_get_core);
+
+int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ int ret = 0;
+
+ if (core_id >= skl->cores.count) {
+ dev_err(ctx->dev, "invalid core id: %d\n", core_id);
+ return -EINVAL;
+ }
+
+ if (--skl->cores.usage_count[core_id] == 0) {
+ ret = ctx->fw_ops.set_state_D3(ctx, core_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "unable to put core %d: %d\n",
+ core_id, ret);
+ skl->cores.usage_count[core_id]++;
+ }
+ }
+
+ dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
+ core_id, skl->cores.state[core_id],
+ skl->cores.usage_count[core_id]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_dsp_put_core);
int skl_dsp_wake(struct sst_dsp *ctx)
{
- return ctx->fw_ops.set_state_D0(ctx);
+ return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID);
}
EXPORT_SYMBOL_GPL(skl_dsp_wake);
int skl_dsp_sleep(struct sst_dsp *ctx)
{
- return ctx->fw_ops.set_state_D3(ctx);
+ return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID);
}
EXPORT_SYMBOL_GPL(skl_dsp_sleep);
free_irq(dsp->irq, dsp);
skl_ipc_op_int_disable(dsp);
- skl_ipc_int_disable(dsp);
-
- skl_dsp_disable_core(dsp);
+ skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
#include <linux/interrupt.h>
#include <sound/memalloc.h>
#include "skl-sst-cldma.h"
+#include "skl-tplg-interface.h"
struct sst_dsp;
struct skl_sst;
#define SKL_ADSPIC_IPC 1
#define SKL_ADSPIS_IPC 1
+/* Core ID of core0 */
+#define SKL_DSP_CORE0_ID 0
+
+/* Mask for a given core index, c = 0.. number of supported cores - 1 */
+#define SKL_DSP_CORE_MASK(c) BIT(c)
+
+/*
+ * Core 0 mask = SKL_DSP_CORE_MASK(0); Defined separately
+ * since Core0 is primary core and it is used often
+ */
+#define SKL_DSP_CORE0_MASK BIT(0)
+
+/*
+ * Mask for a given number of cores
+ * nc = number of supported cores
+ */
+#define SKL_DSP_CORES_MASK(nc) GENMASK((nc - 1), 0)
+
/* ADSPCS - Audio DSP Control & Status */
-#define SKL_DSP_CORES 1
-#define SKL_DSP_CORE0_MASK 1
-#define SKL_DSP_CORES_MASK ((1 << SKL_DSP_CORES) - 1)
-
-/* Core Reset - asserted high */
-#define SKL_ADSPCS_CRST_SHIFT 0
-#define SKL_ADSPCS_CRST_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
-#define SKL_ADSPCS_CRST(x) ((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
-
-/* Core run/stall - when set to '1' core is stalled */
-#define SKL_ADSPCS_CSTALL_SHIFT 8
-#define SKL_ADSPCS_CSTALL_MASK (SKL_DSP_CORES_MASK << \
- SKL_ADSPCS_CSTALL_SHIFT)
-#define SKL_ADSPCS_CSTALL(x) ((x << SKL_ADSPCS_CSTALL_SHIFT) & \
- SKL_ADSPCS_CSTALL_MASK)
-
-/* Set Power Active - when set to '1' turn cores on */
-#define SKL_ADSPCS_SPA_SHIFT 16
-#define SKL_ADSPCS_SPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
-#define SKL_ADSPCS_SPA(x) ((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
-
-/* Current Power Active - power status of cores, set by hardware */
-#define SKL_ADSPCS_CPA_SHIFT 24
-#define SKL_ADSPCS_CPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
-#define SKL_ADSPCS_CPA(x) ((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
-
-#define SST_DSP_POWER_D0 0x0 /* full On */
-#define SST_DSP_POWER_D3 0x3 /* Off */
+
+/*
+ * Core Reset - asserted high
+ * CRST Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_CRST_SHIFT 0
+#define SKL_ADSPCS_CRST_MASK(cm) ((cm) << SKL_ADSPCS_CRST_SHIFT)
+
+/*
+ * Core run/stall - when set to '1' core is stalled
+ * CSTALL Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_CSTALL_SHIFT 8
+#define SKL_ADSPCS_CSTALL_MASK(cm) ((cm) << SKL_ADSPCS_CSTALL_SHIFT)
+
+/*
+ * Set Power Active - when set to '1' turn cores on
+ * SPA Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_SPA_SHIFT 16
+#define SKL_ADSPCS_SPA_MASK(cm) ((cm) << SKL_ADSPCS_SPA_SHIFT)
+
+/*
+ * Current Power Active - power status of cores, set by hardware
+ * CPA Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_CPA_SHIFT 24
+#define SKL_ADSPCS_CPA_MASK(cm) ((cm) << SKL_ADSPCS_CPA_SHIFT)
enum skl_dsp_states {
SKL_DSP_RUNNING = 1,
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
int (*parse_fw)(struct sst_dsp *ctx);
- int (*set_state_D0)(struct sst_dsp *ctx);
- int (*set_state_D3)(struct sst_dsp *ctx);
+ int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
+ int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq);
-int skl_dsp_enable_core(struct sst_dsp *ctx);
-int skl_dsp_disable_core(struct sst_dsp *ctx);
bool is_skl_dsp_running(struct sst_dsp *ctx);
+
+unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
+void skl_dsp_init_core_state(struct sst_dsp *ctx);
+int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx,
+ unsigned int core_mask);
+int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask);
+
irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
int skl_dsp_wake(struct sst_dsp *ctx);
int skl_dsp_sleep(struct sst_dsp *ctx);
void skl_dsp_free(struct sst_dsp *dsp);
+int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id);
+int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id);
+
int skl_dsp_boot(struct sst_dsp *ctx);
int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
+int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
+ struct skl_dfw_module *dfw_config);
+int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset);
+void skl_freeup_uuid_list(struct skl_sst *ctx);
+
+int skl_dsp_strip_extended_manifest(struct firmware *fw);
+
#endif /*__SKL_SST_DSP_H__*/
/* first process the header */
switch (reply) {
case IPC_GLB_REPLY_SUCCESS:
- dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
+ dev_dbg(ipc->dev, "ipc FW reply %x: success\n", header.primary);
/* copy the rx data from the mailbox */
sst_dsp_inbox_read(ipc->dsp, msg->rx_data, msg->rx_size);
break;
/* param_block_size must be in dwords */
u16 param_block_size = msg->param_data_size / sizeof(u32);
- print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
+ print_hex_dump_debug("Param data:", DUMP_PREFIX_NONE,
16, 4, buffer, param_block_size, false);
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
u32 extension;
};
+#define SKL_DSP_CORES_MAX 2
+
+struct skl_dsp_cores {
+ unsigned int count;
+ enum skl_dsp_states state[SKL_DSP_CORES_MAX];
+ int usage_count[SKL_DSP_CORES_MAX];
+};
+
struct skl_sst {
struct device *dev;
struct sst_dsp *dsp;
void (*enable_miscbdcge)(struct device *dev, bool enable);
/*Is CGCTL.MISCBDCGE disabled*/
bool miscbdcg_disabled;
+
+ /* Populate module information */
+ struct list_head uuid_list;
+
+ /* Is firmware loaded */
+ bool fw_loaded;
+
+ /* multi-core */
+ struct skl_dsp_cores cores;
};
struct skl_ipc_init_instance_msg {
bool skl_ipc_int_status(struct sst_dsp *dsp);
void skl_ipc_free(struct sst_generic_ipc *ipc);
int skl_ipc_init(struct device *dev, struct skl_sst *skl);
+void skl_clear_module_cnt(struct sst_dsp *ctx);
#endif /* __SKL_IPC_H */
--- /dev/null
+/*
+ * skl-sst-utils.c - SKL sst utils functions
+ *
+ * Copyright (C) 2016 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include "skl-sst-dsp.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-ipc.h"
+
+
+#define UUID_STR_SIZE 37
+#define DEFAULT_HASH_SHA256_LEN 32
+
+/* FW Extended Manifest Header id = $AE1 */
+#define SKL_EXT_MANIFEST_HEADER_MAGIC 0x31454124
+
+struct skl_dfw_module_mod {
+ char name[100];
+ struct skl_dfw_module skl_dfw_mod;
+};
+
+struct UUID {
+ u8 id[16];
+};
+
+union seg_flags {
+ u32 ul;
+ struct {
+ u32 contents : 1;
+ u32 alloc : 1;
+ u32 load : 1;
+ u32 read_only : 1;
+ u32 code : 1;
+ u32 data : 1;
+ u32 _rsvd0 : 2;
+ u32 type : 4;
+ u32 _rsvd1 : 4;
+ u32 length : 16;
+ } r;
+} __packed;
+
+struct segment_desc {
+ union seg_flags flags;
+ u32 v_base_addr;
+ u32 file_offset;
+};
+
+struct module_type {
+ u32 load_type : 4;
+ u32 auto_start : 1;
+ u32 domain_ll : 1;
+ u32 domain_dp : 1;
+ u32 rsvd : 25;
+} __packed;
+
+struct adsp_module_entry {
+ u32 struct_id;
+ u8 name[8];
+ struct UUID uuid;
+ struct module_type type;
+ u8 hash1[DEFAULT_HASH_SHA256_LEN];
+ u32 entry_point;
+ u16 cfg_offset;
+ u16 cfg_count;
+ u32 affinity_mask;
+ u16 instance_max_count;
+ u16 instance_bss_size;
+ struct segment_desc segments[3];
+} __packed;
+
+struct adsp_fw_hdr {
+ u32 id;
+ u32 len;
+ u8 name[8];
+ u32 preload_page_count;
+ u32 fw_image_flags;
+ u32 feature_mask;
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+ u32 num_modules;
+ u32 hw_buf_base;
+ u32 hw_buf_length;
+ u32 load_offset;
+} __packed;
+
+struct uuid_module {
+ uuid_le uuid;
+ int id;
+ int is_loadable;
+
+ struct list_head list;
+};
+
+struct skl_ext_manifest_hdr {
+ u32 id;
+ u32 len;
+ u16 version_major;
+ u16 version_minor;
+ u32 entries;
+};
+
+int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
+ struct skl_dfw_module *dfw_config)
+{
+ struct uuid_module *module;
+ uuid_le *uuid_mod;
+
+ uuid_mod = (uuid_le *)uuid;
+
+ list_for_each_entry(module, &ctx->uuid_list, list) {
+ if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
+ dfw_config->module_id = module->id;
+ dfw_config->is_loadable = module->is_loadable;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_skl_get_module_info);
+
+/*
+ * Parse the firmware binary to get the UUID, module id
+ * and loadable flags
+ */
+int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset)
+{
+ struct adsp_fw_hdr *adsp_hdr;
+ struct adsp_module_entry *mod_entry;
+ int i, num_entry;
+ uuid_le *uuid_bin;
+ const char *buf;
+ struct skl_sst *skl = ctx->thread_context;
+ struct uuid_module *module;
+ struct firmware stripped_fw;
+ unsigned int safe_file;
+
+ /* Get the FW pointer to derive ADSP header */
+ stripped_fw.data = ctx->fw->data;
+ stripped_fw.size = ctx->fw->size;
+
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
+ buf = stripped_fw.data;
+
+ /* check if we have enough space in file to move to header */
+ safe_file = sizeof(*adsp_hdr) + offset;
+ if (stripped_fw.size <= safe_file) {
+ dev_err(ctx->dev, "Small fw file size, No space for hdr\n");
+ return -EINVAL;
+ }
+
+ adsp_hdr = (struct adsp_fw_hdr *)(buf + offset);
+
+ /* check 1st module entry is in file */
+ safe_file += adsp_hdr->len + sizeof(*mod_entry);
+ if (stripped_fw.size <= safe_file) {
+ dev_err(ctx->dev, "Small fw file size, No module entry\n");
+ return -EINVAL;
+ }
+
+ mod_entry = (struct adsp_module_entry *)
+ (buf + offset + adsp_hdr->len);
+
+ num_entry = adsp_hdr->num_modules;
+
+ /* check all entries are in file */
+ safe_file += num_entry * sizeof(*mod_entry);
+ if (stripped_fw.size <= safe_file) {
+ dev_err(ctx->dev, "Small fw file size, No modules\n");
+ return -EINVAL;
+ }
+
+
+ /*
+ * Read the UUID(GUID) from FW Manifest.
+ *
+ * The 16 byte UUID format is: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
+ * Populate the UUID table to store module_id and loadable flags
+ * for the module.
+ */
+
+ for (i = 0; i < num_entry; i++, mod_entry++) {
+ module = kzalloc(sizeof(*module), GFP_KERNEL);
+ if (!module)
+ return -ENOMEM;
+
+ uuid_bin = (uuid_le *)mod_entry->uuid.id;
+ memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
+
+ module->id = i;
+ module->is_loadable = mod_entry->type.load_type;
+
+ list_add_tail(&module->list, &skl->uuid_list);
+
+ dev_dbg(ctx->dev,
+ "Adding uuid :%pUL mod id: %d Loadable: %d\n",
+ &module->uuid, module->id, module->is_loadable);
+ }
+
+ return 0;
+}
+
+void skl_freeup_uuid_list(struct skl_sst *ctx)
+{
+ struct uuid_module *uuid, *_uuid;
+
+ list_for_each_entry_safe(uuid, _uuid, &ctx->uuid_list, list) {
+ list_del(&uuid->list);
+ kfree(uuid);
+ }
+}
+
+/*
+ * some firmware binary contains some extended manifest. This needs
+ * to be stripped in that case before we load and use that image.
+ *
+ * Get the module id for the module by checking
+ * the table for the UUID for the module
+ */
+int skl_dsp_strip_extended_manifest(struct firmware *fw)
+{
+ struct skl_ext_manifest_hdr *hdr;
+
+ /* check if fw file is greater than header we are looking */
+ if (fw->size < sizeof(hdr)) {
+ pr_err("%s: Firmware file small, no hdr\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr = (struct skl_ext_manifest_hdr *)fw->data;
+
+ if (hdr->id == SKL_EXT_MANIFEST_HEADER_MAGIC) {
+ fw->size -= hdr->len;
+ fw->data += hdr->len;
+ }
+
+ return 0;
+}
return ret;
}
+#define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
+
static int skl_load_base_firmware(struct sst_dsp *ctx)
{
int ret = 0, i;
struct skl_sst *skl = ctx->thread_context;
+ struct firmware stripped_fw;
u32 reg;
skl->boot_complete = false;
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
- skl_dsp_disable_core(ctx);
return -EIO;
}
}
+ ret = snd_skl_parse_uuids(ctx, SKL_ADSP_FW_BIN_HDR_OFFSET);
+ if (ret < 0) {
+ dev_err(ctx->dev,
+ "UUID parsing err: %d\n", ret);
+ release_firmware(ctx->fw);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
+ return ret;
+ }
+
+ /* check for extended manifest */
+ stripped_fw.data = ctx->fw->data;
+ stripped_fw.size = ctx->fw->size;
+
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
ret = skl_dsp_boot(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Boot dsp core failed ret: %d", ret);
goto transfer_firmware_failed;
}
- ret = skl_transfer_firmware(ctx, ctx->fw->data, ctx->fw->size);
+ ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
goto transfer_firmware_failed;
}
dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+ skl->fw_loaded = true;
}
return 0;
transfer_firmware_failed:
ctx->cl_dev.ops.cl_cleanup_controller(ctx);
skl_load_base_firmware_failed:
- skl_dsp_disable_core(ctx);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
release_firmware(ctx->fw);
ctx->fw = NULL;
return ret;
}
-static int skl_set_dsp_D0(struct sst_dsp *ctx)
+static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
+ struct skl_ipc_dxstate_info dx;
+ struct skl_sst *skl = ctx->thread_context;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- ret = skl_load_base_firmware(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "unable to load firmware\n");
- return ret;
+ /* If core0 is being turned on, we need to load the FW */
+ if (core_id == SKL_DSP_CORE0_ID) {
+ ret = skl_load_base_firmware(ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "unable to load firmware\n");
+ return ret;
+ }
}
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+ /*
+ * If any core other than core 0 is being moved to D0, enable the
+ * core and send the set dx IPC for the core.
+ */
+ if (core_id != SKL_DSP_CORE0_ID) {
+ ret = skl_dsp_enable_core(ctx, core_mask);
+ if (ret < 0)
+ return ret;
+
+ dx.core_mask = core_mask;
+ dx.dx_mask = core_mask;
+
+ ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
+ SKL_BASE_FW_MODULE_ID, &dx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
+ core_id);
+ skl_dsp_disable_core(ctx, core_mask);
+ }
+ }
+
+ skl->cores.state[core_id] = SKL_DSP_RUNNING;
return ret;
}
-static int skl_set_dsp_D3(struct sst_dsp *ctx)
+static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
struct skl_ipc_dxstate_info dx;
struct skl_sst *skl = ctx->thread_context;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- dev_dbg(ctx->dev, "In %s:\n", __func__);
- mutex_lock(&ctx->mutex);
- if (!is_skl_dsp_running(ctx)) {
- mutex_unlock(&ctx->mutex);
- return 0;
- }
- mutex_unlock(&ctx->mutex);
-
- dx.core_mask = SKL_DSP_CORE0_MASK;
+ dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
+
ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
if (ret < 0)
- dev_err(ctx->dev,
- "D3 request to FW failed, continuing reset: %d", ret);
-
- /* disable Interrupt */
- ctx->cl_dev.ops.cl_cleanup_controller(ctx);
- skl_cldma_int_disable(ctx);
- skl_ipc_op_int_disable(ctx);
- skl_ipc_int_disable(ctx);
-
- ret = skl_dsp_disable_core(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
- ret = -EIO;
+ dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
+
+ if (core_id == SKL_DSP_CORE0_ID) {
+ /* disable Interrupt */
+ ctx->cl_dev.ops.cl_cleanup_controller(ctx);
+ skl_cldma_int_disable(ctx);
+ skl_ipc_op_int_disable(ctx);
+ skl_ipc_int_disable(ctx);
}
- skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+ ret = skl_dsp_disable_core(ctx, core_mask);
+ if (ret < 0)
+ return ret;
+
+ skl->cores.state[core_id] = SKL_DSP_RESET;
return ret;
}
return ret;
}
+void skl_clear_module_cnt(struct sst_dsp *ctx)
+{
+ struct skl_module_table *module;
+
+ if (list_empty(&ctx->module_list))
+ return;
+
+ list_for_each_entry(module, &ctx->module_list, list) {
+ module->usage_cnt = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
+
static void skl_clear_module_table(struct sst_dsp *ctx)
{
struct skl_module_table *module, *tmp;
skl->dev = dev;
skl_dev.thread_context = skl;
+ INIT_LIST_HEAD(&skl->uuid_list);
skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
if (!skl->dsp) {
if (ret)
return ret;
+ skl->cores.count = 2;
+
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "Load base fw failed : %d", ret);
goto cleanup;
}
+ skl_dsp_init_core_state(sst);
+
if (dsp)
*dsp = skl;
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
skl_clear_module_table(ctx->dsp);
+ skl_freeup_uuid_list(ctx);
skl_ipc_free(&ctx->ipc);
ctx->dsp->ops->free(ctx->dsp);
if (ctx->boot_complete) {
skl_dump_mconfig(ctx, m_cfg);
}
-/*
- * A pipe can have multiple modules, each of them will be a DAPM widget as
- * well. While managing a pipeline we need to get the list of all the
- * widgets in a pipelines, so this helper - skl_tplg_get_pipe_widget() helps
- * to get the SKL type widgets in that pipeline
- */
-static int skl_tplg_alloc_pipe_widget(struct device *dev,
- struct snd_soc_dapm_widget *w, struct skl_pipe *pipe)
-{
- struct skl_module_cfg *src_module = NULL;
- struct snd_soc_dapm_path *p = NULL;
- struct skl_pipe_module *p_module = NULL;
-
- p_module = devm_kzalloc(dev, sizeof(*p_module), GFP_KERNEL);
- if (!p_module)
- return -ENOMEM;
-
- p_module->w = w;
- list_add_tail(&p_module->node, &pipe->w_list);
-
- snd_soc_dapm_widget_for_each_sink_path(w, p) {
- if ((p->sink->priv == NULL)
- && (!is_skl_dsp_widget_type(w)))
- continue;
-
- if ((p->sink->priv != NULL) && p->connect
- && is_skl_dsp_widget_type(p->sink)) {
-
- src_module = p->sink->priv;
- if (pipe->ppl_id == src_module->pipe->ppl_id)
- skl_tplg_alloc_pipe_widget(dev,
- p->sink, pipe);
- }
- }
- return 0;
-}
-
/*
* some modules can have multiple params set from user control and
* need to be set after module is initialized. If set_param flag is
if (bc->set_params == SKL_PARAM_SET) {
ret = skl_set_module_params(ctx,
- (u32 *)bc->params, bc->max,
+ (u32 *)bc->params, bc->size,
bc->param_id, mconfig);
if (ret < 0)
return ret;
continue;
mconfig->formats_config.caps = (u32 *)&bc->params;
- mconfig->formats_config.caps_size = bc->max;
+ mconfig->formats_config.caps_size = bc->size;
break;
}
if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
mconfig->id.module_id, mconfig->guid);
if (ret < 0)
return ret;
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
return ret;
if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM;
- skl_tplg_alloc_pipe_mem(skl, mconfig);
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
/*
* Create a list of modules for pipe.
* This list contains modules from source to sink
if (ret < 0)
return ret;
- /*
- * we create a w_list of all widgets in that pipe. This list is not
- * freed on PMD event as widgets within a pipe are static. This
- * saves us cycles to get widgets in pipe every time.
- *
- * So if we have already initialized all the widgets of a pipeline
- * we skip, so check for list_empty and create the list if empty
- */
- if (list_empty(&s_pipe->w_list)) {
- ret = skl_tplg_alloc_pipe_widget(ctx->dev, w, s_pipe);
- if (ret < 0)
- return ret;
- }
+ skl_tplg_alloc_pipe_mem(skl, mconfig);
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
/* Init all pipe modules from source to sink */
ret = skl_tplg_init_pipe_modules(skl, s_pipe);
struct skl_pipe *s_pipe = mconfig->pipe;
int ret = 0;
+ if (s_pipe->state == SKL_PIPE_INVALID)
+ return -EINVAL;
+
skl_tplg_free_pipe_mcps(skl, mconfig);
skl_tplg_free_pipe_mem(skl, mconfig);
list_for_each_entry(w_module, &s_pipe->w_list, node) {
dst_module = w_module->w->priv;
- skl_tplg_free_pipe_mcps(skl, dst_module);
+ if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
+ skl_tplg_free_pipe_mcps(skl, dst_module);
if (src_module == NULL) {
src_module = dst_module;
continue;
if (w->power)
skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
- bc->max, bc->param_id, mconfig);
+ bc->size, bc->param_id, mconfig);
/* decrement size for TLV header */
size -= 2 * sizeof(u32);
struct skl *skl = get_skl_ctx(w->dapm->dev);
if (ac->params) {
+ if (size > ac->max)
+ return -EINVAL;
+
+ ac->size = size;
/*
* if the param_is is of type Vendor, firmware expects actual
* parameter id and size from the control.
if (w->power)
return skl_set_module_params(skl->skl_sst,
- (u32 *)ac->params, ac->max,
+ (u32 *)ac->params, ac->size,
ac->param_id, mconfig);
}
return 0;
}
+/*
+ * Fill the dma id for host and link. In case of passthrough
+ * pipeline, this will both host and link in the same
+ * pipeline, so need to copy the link and host based on dev_type
+ */
+static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
+ struct skl_pipe_params *params)
+{
+ struct skl_pipe *pipe = mcfg->pipe;
+
+ if (pipe->passthru) {
+ switch (mcfg->dev_type) {
+ case SKL_DEVICE_HDALINK:
+ pipe->p_params->link_dma_id = params->link_dma_id;
+ break;
+
+ case SKL_DEVICE_HDAHOST:
+ pipe->p_params->host_dma_id = params->host_dma_id;
+ break;
+
+ default:
+ break;
+ }
+ pipe->p_params->s_fmt = params->s_fmt;
+ pipe->p_params->ch = params->ch;
+ pipe->p_params->s_freq = params->s_freq;
+ pipe->p_params->stream = params->stream;
+
+ } else {
+ memcpy(pipe->p_params, params, sizeof(*params));
+ }
+}
+
/*
* The FE params are passed by hw_params of the DAI.
* On hw_params, the params are stored in Gateway module of the FE and we
struct skl_module_cfg *mconfig,
struct skl_pipe_params *params)
{
- struct skl_pipe *pipe = mconfig->pipe;
struct skl_module_fmt *format = NULL;
- memcpy(pipe->p_params, params, sizeof(*params));
+ skl_tplg_fill_dma_id(mconfig, params);
if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
format = &mconfig->in_fmt[0];
struct skl_module_cfg *mconfig,
struct skl_pipe_params *params)
{
- struct skl_pipe *pipe = mconfig->pipe;
struct nhlt_specific_cfg *cfg;
struct skl *skl = get_skl_ctx(dai->dev);
int link_type = skl_tplg_be_link_type(mconfig->dev_type);
- memcpy(pipe->p_params, params, sizeof(*params));
+ skl_tplg_fill_dma_id(mconfig, params);
if (link_type == NHLT_LINK_HDA)
return 0;
}
}
+static void skl_clear_pin_config(struct snd_soc_platform *platform,
+ struct snd_soc_dapm_widget *w)
+{
+ int i;
+ struct skl_module_cfg *mconfig;
+ struct skl_pipe *pipe;
+
+ if (!strncmp(w->dapm->component->name, platform->component.name,
+ strlen(platform->component.name))) {
+ mconfig = w->priv;
+ pipe = mconfig->pipe;
+ for (i = 0; i < mconfig->max_in_queue; i++) {
+ mconfig->m_in_pin[i].in_use = false;
+ mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
+ }
+ for (i = 0; i < mconfig->max_out_queue; i++) {
+ mconfig->m_out_pin[i].in_use = false;
+ mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
+ }
+ pipe->state = SKL_PIPE_INVALID;
+ mconfig->m_state = SKL_MODULE_UNINIT;
+ }
+}
+
+void skl_cleanup_resources(struct skl *skl)
+{
+ struct skl_sst *ctx = skl->skl_sst;
+ struct snd_soc_platform *soc_platform = skl->platform;
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_card *card;
+
+ if (soc_platform == NULL)
+ return;
+
+ card = soc_platform->component.card;
+ if (!card || !card->instantiated)
+ return;
+
+ skl->resource.mem = 0;
+ skl->resource.mcps = 0;
+
+ list_for_each_entry(w, &card->widgets, list) {
+ if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
+ skl_clear_pin_config(soc_platform, w);
+ }
+
+ skl_clear_module_cnt(ctx->dsp);
+}
+
/*
* Topology core widget load callback
*
w->priv = mconfig;
memcpy(&mconfig->guid, &dfw_config->uuid, 16);
+ ret = snd_skl_get_module_info(skl->skl_sst, mconfig->guid, dfw_config);
+ if (ret < 0)
+ return ret;
+
mconfig->id.module_id = dfw_config->module_id;
mconfig->id.instance_id = dfw_config->instance_id;
mconfig->mcps = dfw_config->max_mcps;
ac->max = dfw_ac->max;
ac->param_id = dfw_ac->param_id;
ac->set_params = dfw_ac->set_params;
+ ac->size = dfw_ac->max;
if (ac->max) {
ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
};
+/*
+ * A pipe can have multiple modules, each of them will be a DAPM widget as
+ * well. While managing a pipeline we need to get the list of all the
+ * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
+ * helps to get the SKL type widgets in that pipeline
+ */
+static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
+{
+ struct snd_soc_dapm_widget *w;
+ struct skl_module_cfg *mcfg = NULL;
+ struct skl_pipe_module *p_module = NULL;
+ struct skl_pipe *pipe;
+
+ list_for_each_entry(w, &platform->component.card->widgets, list) {
+ if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
+ mcfg = w->priv;
+ pipe = mcfg->pipe;
+
+ p_module = devm_kzalloc(platform->dev,
+ sizeof(*p_module), GFP_KERNEL);
+ if (!p_module)
+ return -ENOMEM;
+
+ p_module->w = w;
+ list_add_tail(&p_module->node, &pipe->w_list);
+ }
+ }
+
+ return 0;
+}
+
+static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
+{
+ struct skl_pipe_module *w_module;
+ struct snd_soc_dapm_widget *w;
+ struct skl_module_cfg *mconfig;
+ bool host_found = false, link_found = false;
+
+ list_for_each_entry(w_module, &pipe->w_list, node) {
+ w = w_module->w;
+ mconfig = w->priv;
+
+ if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
+ host_found = true;
+ else if (mconfig->dev_type != SKL_DEVICE_NONE)
+ link_found = true;
+ }
+
+ if (host_found && link_found)
+ pipe->passthru = true;
+ else
+ pipe->passthru = false;
+}
+
/* This will be read from topology manifest, currently defined here */
#define SKL_MAX_MCPS 30000000
#define SKL_FW_MAX_MEM 1000000
const struct firmware *fw;
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl *skl = ebus_to_skl(ebus);
+ struct skl_pipeline *ppl;
ret = request_firmware(&fw, skl->tplg_name, bus->dev);
if (ret < 0) {
skl->resource.max_mem = SKL_FW_MAX_MEM;
skl->tplg = fw;
+ ret = skl_tplg_create_pipe_widget_list(platform);
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(ppl, &skl->ppl_list, node)
+ skl_tplg_set_pipe_type(skl, ppl->pipe);
return 0;
}
SKL_PIPE_INVALID = 0,
SKL_PIPE_CREATED = 1,
SKL_PIPE_PAUSED = 2,
- SKL_PIPE_STARTED = 3
+ SKL_PIPE_STARTED = 3,
+ SKL_PIPE_RESET = 4
};
struct skl_pipe_module {
struct skl_pipe_params *p_params;
enum skl_pipe_state state;
struct list_head w_list;
+ bool passthru;
};
enum skl_module_state {
u32 param_id;
u32 set_params;
u32 max;
+ u32 size;
char *params;
};
int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config);
int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
+static struct skl_machine_pdata skl_dmic_data;
+
/*
* initialize the PCI registers
*/
{
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct pci_dev *pci = to_pci_dev(bus->dev);
int ret;
snd_hdac_ext_bus_link_power_down_all(ebus);
return ret;
snd_hdac_bus_stop_chip(bus);
+ update_pci_dword(pci, AZX_PCIREG_PGCTL,
+ AZX_PGCTL_LSRMD_MASK, AZX_PGCTL_LSRMD_MASK);
skl_enable_miscbdcge(bus->dev, false);
snd_hdac_bus_enter_link_reset(bus);
skl_enable_miscbdcge(bus->dev, true);
+ skl_cleanup_resources(skl);
return 0;
}
ret = _skl_suspend(ebus);
if (ret < 0)
return ret;
+ skl->skl_sst->fw_loaded = false;
}
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
platform_device_put(pdev);
return -EIO;
}
+
+ if (mach->pdata)
+ dev_set_drvdata(&pdev->dev, mach->pdata);
+
skl->i2s_dev = pdev;
return 0;
skl->pci_id = pci->device;
+ device_disable_async_suspend(bus->dev);
+
skl->nhlt = skl_nhlt_init(bus->dev);
if (skl->nhlt == NULL)
pci_set_drvdata(skl->pci, ebus);
+ skl_dmic_data.dmic_num = skl_get_dmic_geo(skl);
+
/* check if dsp is there */
if (ebus->ppcap) {
err = skl_machine_device_register(skl,
list_for_each_entry(hlink, &ebus->hlink_list, list)
snd_hdac_ext_bus_link_put(ebus, hlink);
- /*configure PM */
+ /* configure PM */
pm_runtime_put_noidle(bus->dev);
pm_runtime_allow(bus->dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
- if (skl->tplg)
- release_firmware(skl->tplg);
+ release_firmware(skl->tplg);
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
static struct sst_acpi_mach sst_skl_devdata[] = {
{ "INT343A", "skl_alc286s_i2s", "intel/dsp_fw_release.bin", NULL, NULL, NULL },
- { "INT343B", "skl_nau88l25_ssm4567_i2s", "intel/dsp_fw_release.bin",
- NULL, NULL, NULL },
- { "MX98357A", "skl_nau88l25_max98357a_i2s", "intel/dsp_fw_release.bin",
- NULL, NULL, NULL },
+ { "INT343B", "skl_n88l25_s4567", "intel/dsp_fw_release.bin",
+ NULL, NULL, &skl_dmic_data },
+ { "MX98357A", "skl_n88l25_m98357a", "intel/dsp_fw_release.bin",
+ NULL, NULL, &skl_dmic_data },
{}
};
static struct sst_acpi_mach sst_bxtp_devdata[] = {
{ "INT343A", "bxt_alc298s_i2s", "intel/dsp_fw_bxtn.bin", NULL, NULL, NULL },
+ { "DLGS7219", "bxt_da7219_max98357a_i2s", "intel/dsp_fw_bxtn.bin", NULL, NULL, NULL },
+};
+
+static struct sst_acpi_mach sst_kbl_devdata[] = {
+ { "INT343A", "kbl_alc286s_i2s", "intel/dsp_fw_kbl.bin", NULL, NULL, NULL },
+ { "INT343B", "kbl_n88l25_s4567", "intel/dsp_fw_kbl.bin", NULL, NULL, &skl_dmic_data },
+ { "MX98357A", "kbl_n88l25_m98357a", "intel/dsp_fw_kbl.bin", NULL, NULL, &skl_dmic_data },
+ {}
};
/* PCI IDs */
/* BXT-P */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = (unsigned long)&sst_bxtp_devdata},
+ /* KBL */
+ { PCI_DEVICE(0x8086, 0x9D71),
+ .driver_data = (unsigned long)&sst_kbl_devdata},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, skl_ids);
#define AZX_REG_VS_SDXEFIFOS_XBASE 0x1094
#define AZX_REG_VS_SDXEFIFOS_XINTERVAL 0x20
+#define AZX_PCIREG_PGCTL 0x44
+#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PCIREG_CGCTL 0x48
#define AZX_CGCTL_MISCBDCGE_MASK (1 << 6)
unsigned int init_failed:1; /* delayed init failed */
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
+ struct snd_soc_platform *platform;
struct nhlt_acpi_table *nhlt; /* nhlt ptr */
struct skl_sst *skl_sst; /* sst skl ctx */
u8 stream_tag;
};
+/* to pass dmic data */
+struct skl_machine_pdata {
+ u32 dmic_num;
+};
+
struct skl_dsp_ops {
int id;
struct skl_dsp_loader_ops (*loader_ops)(void);
struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
+int skl_get_dmic_geo(struct skl *skl);
int skl_nhlt_update_topology_bin(struct skl *skl);
int skl_init_dsp(struct skl *skl);
int skl_free_dsp(struct skl *skl);
int skl_suspend_dsp(struct skl *skl);
int skl_resume_dsp(struct skl *skl);
+void skl_cleanup_resources(struct skl *skl);
#endif /* __SOUND_SOC_SKL_H */
config SND_SOC_MEDIATEK
- tristate "ASoC support for Mediatek chip"
+ tristate
+
+config SND_SOC_MT2701
+ tristate "ASoC support for Mediatek MT2701 chip"
+ depends on ARCH_MEDIATEK
+ select SND_SOC_MEDIATEK
+ help
+ This adds ASoC driver for Mediatek MT2701 boards
+ that can be used with other codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MT2701_CS42448
+ tristate "ASoc Audio driver for MT2701 with CS42448 codec"
+ depends on SND_SOC_MT2701
+ select SND_SOC_CS42XX8_I2C
+ select SND_SOC_BT_SCO
+ help
+ This adds ASoC driver for Mediatek MT2701 boards
+ with the CS42448 codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MT8173
+ tristate "ASoC support for Mediatek MT8173 chip"
depends on ARCH_MEDIATEK
+ select SND_SOC_MEDIATEK
help
- This adds ASoC platform driver support for Mediatek chip
+ This adds ASoC platform driver support for Mediatek MT8173 chip
that can be used with other codecs.
Select Y if you have such device.
Ex: MT8173
config SND_SOC_MT8173_MAX98090
tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_MAX98090
help
This adds ASoC driver for Mediatek MT8173 boards
config SND_SOC_MT8173_RT5650
tristate "ASoC Audio driver for MT8173 with RT5650 codec"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_RT5645
+ select SND_SOC_HDMI_CODEC
help
This adds ASoC driver for Mediatek MT8173 boards
with the RT5650 audio codec.
config SND_SOC_MT8173_RT5650_RT5514
tristate "ASoC Audio driver for MT8173 with RT5650 RT5514 codecs"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_RT5645
select SND_SOC_RT5514
help
config SND_SOC_MT8173_RT5650_RT5676
tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_RT5645
select SND_SOC_RT5677
select SND_SOC_HDMI_CODEC
-# MTK Platform Support
-obj-$(CONFIG_SND_SOC_MEDIATEK) += mtk-afe-pcm.o
-# Machine support
-obj-$(CONFIG_SND_SOC_MT8173_MAX98090) += mt8173-max98090.o
-obj-$(CONFIG_SND_SOC_MT8173_RT5650) += mt8173-rt5650.o
-obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5514) += mt8173-rt5650-rt5514.o
-obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5676) += mt8173-rt5650-rt5676.o
+obj-$(CONFIG_SND_SOC_MEDIATEK) += common/
+obj-$(CONFIG_SND_SOC_MT2701) += mt2701/
+obj-$(CONFIG_SND_SOC_MT8173) += mt8173/
--- /dev/null
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# platform driver
+snd-soc-mtk-common-objs := mtk-afe-platform-driver.o mtk-afe-fe-dai.o
+obj-$(CONFIG_SND_SOC_MEDIATEK) += snd-soc-mtk-common.o
--- /dev/null
+/*
+ * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "mtk-afe-fe-dai.h"
+#include "mtk-base-afe.h"
+
+#define AFE_BASE_END_OFFSET 8
+
+int mtk_regmap_update_bits(struct regmap *map, int reg, unsigned int mask,
+ unsigned int val)
+{
+ if (reg < 0)
+ return 0;
+ return regmap_update_bits(map, reg, mask, val);
+}
+
+int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
+{
+ if (reg < 0)
+ return 0;
+ return regmap_write(map, reg, val);
+}
+
+int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int memif_num = rtd->cpu_dai->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
+ const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
+ int ret;
+
+ memif->substream = substream;
+
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
+ /* enable agent */
+ mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
+ 1 << memif->data->agent_disable_shift,
+ 0 << memif->data->agent_disable_shift);
+
+ snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
+
+ /*
+ * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
+ * smaller than period_size due to AFE's internal buffer.
+ * This easily leads to overrun when avail_min is period_size.
+ * One more period can hold the possible unread buffer.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ int periods_max = mtk_afe_hardware->periods_max;
+
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS,
+ 3, periods_max);
+ if (ret < 0) {
+ dev_err(afe->dev, "hw_constraint_minmax failed\n");
+ return ret;
+ }
+ }
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
+
+ /* dynamic allocate irq to memif */
+ if (memif->irq_usage < 0) {
+ int irq_id = mtk_dynamic_irq_acquire(afe);
+
+ if (irq_id != afe->irqs_size) {
+ /* link */
+ memif->irq_usage = irq_id;
+ } else {
+ dev_err(afe->dev, "%s() error: no more asys irq\n",
+ __func__);
+ ret = -EBUSY;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
+
+void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int irq_id;
+
+ irq_id = memif->irq_usage;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
+ 1 << memif->data->agent_disable_shift,
+ 1 << memif->data->agent_disable_shift);
+
+ if (!memif->const_irq) {
+ mtk_dynamic_irq_release(afe, irq_id);
+ memif->irq_usage = -1;
+ memif->substream = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
+
+int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int msb_at_bit33 = 0;
+ int ret, fs = 0;
+
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+
+ msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
+ memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
+ memif->buffer_size = substream->runtime->dma_bytes;
+
+ /* start */
+ mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
+ memif->phys_buf_addr);
+ /* end */
+ mtk_regmap_write(afe->regmap,
+ memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
+ memif->phys_buf_addr + memif->buffer_size - 1);
+
+ /* set MSB to 33-bit */
+ mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
+ 1 << memif->data->msb_shift,
+ msb_at_bit33 << memif->data->msb_shift);
+
+ /* set channel */
+ if (memif->data->mono_shift >= 0) {
+ unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
+ 1 << memif->data->mono_shift,
+ mono << memif->data->mono_shift);
+ }
+
+ /* set rate */
+ if (memif->data->fs_shift < 0)
+ return 0;
+
+ fs = afe->memif_fs(substream, params_rate(params));
+
+ if (fs < 0)
+ return -EINVAL;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
+ memif->data->fs_maskbit << memif->data->fs_shift,
+ fs << memif->data->fs_shift);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
+
+int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
+
+int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+ unsigned int counter = runtime->period_size;
+ int fs;
+
+ dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (memif->data->enable_shift >= 0)
+ mtk_regmap_update_bits(afe->regmap,
+ memif->data->enable_reg,
+ 1 << memif->data->enable_shift,
+ 1 << memif->data->enable_shift);
+
+ /* set irq counter */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit
+ << irq_data->irq_cnt_shift,
+ counter << irq_data->irq_cnt_shift);
+
+ /* set irq fs */
+ fs = afe->irq_fs(substream, runtime->rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
+ irq_data->irq_fs_maskbit
+ << irq_data->irq_fs_shift,
+ fs << irq_data->irq_fs_shift);
+
+ /* enable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1 << irq_data->irq_en_shift,
+ 1 << irq_data->irq_en_shift);
+
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
+ 1 << memif->data->enable_shift, 0);
+ /* disable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1 << irq_data->irq_en_shift,
+ 0 << irq_data->irq_en_shift);
+ /* and clear pending IRQ */
+ mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
+ 1 << irq_data->irq_clr_shift);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
+
+int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int hd_audio = 0;
+
+ /* set hd mode */
+ switch (substream->runtime->format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ hd_audio = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ hd_audio = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ hd_audio = 1;
+ break;
+ default:
+ dev_err(afe->dev, "%s() error: unsupported format %d\n",
+ __func__, substream->runtime->format);
+ break;
+ }
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
+ 1 << memif->data->hd_shift,
+ hd_audio << memif->data->hd_shift);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
+
+const struct snd_soc_dai_ops mtk_afe_fe_ops = {
+ .startup = mtk_afe_fe_startup,
+ .shutdown = mtk_afe_fe_shutdown,
+ .hw_params = mtk_afe_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mtk_afe_fe_trigger,
+};
+EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
+
+static DEFINE_MUTEX(irqs_lock);
+int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
+{
+ int i;
+
+ mutex_lock(&afe->irq_alloc_lock);
+ for (i = 0; i < afe->irqs_size; ++i) {
+ if (afe->irqs[i].irq_occupyed == 0) {
+ afe->irqs[i].irq_occupyed = 1;
+ mutex_unlock(&afe->irq_alloc_lock);
+ return i;
+ }
+ }
+ mutex_unlock(&afe->irq_alloc_lock);
+ return afe->irqs_size;
+}
+EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
+
+int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
+{
+ mutex_lock(&afe->irq_alloc_lock);
+ if (irq_id >= 0 && irq_id < afe->irqs_size) {
+ afe->irqs[irq_id].irq_occupyed = 0;
+ mutex_unlock(&afe->irq_alloc_lock);
+ return 0;
+ }
+ mutex_unlock(&afe->irq_alloc_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
+
+int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct device *dev = afe->dev;
+ struct regmap *regmap = afe->regmap;
+ int i;
+
+ if (pm_runtime_status_suspended(dev) || afe->suspended)
+ return 0;
+
+ if (!afe->reg_back_up)
+ afe->reg_back_up =
+ devm_kcalloc(dev, afe->reg_back_up_list_num,
+ sizeof(unsigned int), GFP_KERNEL);
+
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ regmap_read(regmap, afe->reg_back_up_list[i],
+ &afe->reg_back_up[i]);
+
+ afe->suspended = true;
+ afe->runtime_suspend(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
+
+int mtk_afe_dai_resume(struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct device *dev = afe->dev;
+ struct regmap *regmap = afe->regmap;
+ int i = 0;
+
+ if (pm_runtime_status_suspended(dev) || !afe->suspended)
+ return 0;
+
+ afe->runtime_resume(dev);
+
+ if (!afe->reg_back_up)
+ dev_dbg(dev, "%s no reg_backup\n", __func__);
+
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+ afe->reg_back_up[i]);
+
+ afe->suspended = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
+
+MODULE_DESCRIPTION("Mediatek simple fe dai operator");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
--- /dev/null
+/*
+ * mtk-afe-fe-dais.h -- Mediatek afe fe dai operator definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_AFE_FE_DAI_H_
+#define _MTK_AFE_FE_DAI_H_
+
+struct snd_soc_dai_ops;
+struct mtk_base_afe;
+struct mtk_base_afe_memif;
+
+int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai);
+
+extern const struct snd_soc_dai_ops mtk_afe_fe_ops;
+
+int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe);
+int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id);
+int mtk_afe_dai_suspend(struct snd_soc_dai *dai);
+int mtk_afe_dai_resume(struct snd_soc_dai *dai);
+
+#endif
--- /dev/null
+/*
+ * mtk-afe-platform-driver.c -- Mediatek afe platform driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <sound/soc.h>
+
+#include "mtk-afe-platform-driver.h"
+#include "mtk-base-afe.h"
+
+static snd_pcm_uframes_t mtk_afe_pcm_pointer
+ (struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ const struct mtk_base_memif_data *memif_data = memif->data;
+ struct regmap *regmap = afe->regmap;
+ struct device *dev = afe->dev;
+ int reg_ofs_base = memif_data->reg_ofs_base;
+ int reg_ofs_cur = memif_data->reg_ofs_cur;
+ unsigned int hw_ptr = 0, hw_base = 0;
+ int ret, pcm_ptr_bytes;
+
+ ret = regmap_read(regmap, reg_ofs_cur, &hw_ptr);
+ if (ret || hw_ptr == 0) {
+ dev_err(dev, "%s hw_ptr err\n", __func__);
+ pcm_ptr_bytes = 0;
+ goto POINTER_RETURN_FRAMES;
+ }
+
+ ret = regmap_read(regmap, reg_ofs_base, &hw_base);
+ if (ret || hw_base == 0) {
+ dev_err(dev, "%s hw_ptr err\n", __func__);
+ pcm_ptr_bytes = 0;
+ goto POINTER_RETURN_FRAMES;
+ }
+
+ pcm_ptr_bytes = hw_ptr - hw_base;
+
+POINTER_RETURN_FRAMES:
+ return bytes_to_frames(substream->runtime, pcm_ptr_bytes);
+}
+
+static const struct snd_pcm_ops mtk_afe_pcm_ops = {
+ .ioctl = snd_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+};
+
+static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ size_t size;
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+
+ size = afe->mtk_afe_hardware->buffer_bytes_max;
+ return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ card->dev, size, size);
+}
+
+static void mtk_afe_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+const struct snd_soc_platform_driver mtk_afe_pcm_platform = {
+ .ops = &mtk_afe_pcm_ops,
+ .pcm_new = mtk_afe_pcm_new,
+ .pcm_free = mtk_afe_pcm_free,
+};
+EXPORT_SYMBOL_GPL(mtk_afe_pcm_platform);
+
+MODULE_DESCRIPTION("Mediatek simple platform driver");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
--- /dev/null
+/*
+ * mtk-afe-platform-driver.h -- Mediatek afe platform driver definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_AFE_PLATFORM_DRIVER_H_
+#define _MTK_AFE_PLATFORM_DRIVER_H_
+
+extern const struct snd_soc_platform_driver mtk_afe_pcm_platform;
+
+#endif
+
--- /dev/null
+/*
+ * mtk-base-afe.h -- Mediatek base afe structure
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_BASE_AFE_H_
+#define _MTK_BASE_AFE_H_
+
+struct mtk_base_memif_data {
+ int id;
+ const char *name;
+ int reg_ofs_base;
+ int reg_ofs_cur;
+ int fs_reg;
+ int fs_shift;
+ int fs_maskbit;
+ int mono_reg;
+ int mono_shift;
+ int enable_reg;
+ int enable_shift;
+ int hd_reg;
+ int hd_shift;
+ int msb_reg;
+ int msb_shift;
+ int agent_disable_reg;
+ int agent_disable_shift;
+};
+
+struct mtk_base_irq_data {
+ int id;
+ int irq_cnt_reg;
+ int irq_cnt_shift;
+ int irq_cnt_maskbit;
+ int irq_fs_reg;
+ int irq_fs_shift;
+ int irq_fs_maskbit;
+ int irq_en_reg;
+ int irq_en_shift;
+ int irq_clr_reg;
+ int irq_clr_shift;
+};
+
+struct device;
+struct mtk_base_afe_memif;
+struct mtk_base_afe_irq;
+struct regmap;
+struct snd_pcm_substream;
+struct snd_soc_dai;
+
+struct mtk_base_afe {
+ void __iomem *base_addr;
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex irq_alloc_lock; /* dynamic alloc irq lock */
+
+ unsigned int const *reg_back_up_list;
+ unsigned int *reg_back_up;
+ unsigned int reg_back_up_list_num;
+
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ bool suspended;
+
+ struct mtk_base_afe_memif *memif;
+ int memif_size;
+ struct mtk_base_afe_irq *irqs;
+ int irqs_size;
+
+ const struct snd_pcm_hardware *mtk_afe_hardware;
+ int (*memif_fs)(struct snd_pcm_substream *substream,
+ unsigned int rate);
+ int (*irq_fs)(struct snd_pcm_substream *substream,
+ unsigned int rate);
+
+ void *platform_priv;
+};
+
+struct mtk_base_afe_memif {
+ unsigned int phys_buf_addr;
+ int buffer_size;
+ struct snd_pcm_substream *substream;
+ const struct mtk_base_memif_data *data;
+ int irq_usage;
+ int const_irq;
+};
+
+struct mtk_base_afe_irq {
+ const struct mtk_base_irq_data *irq_data;
+ int irq_occupyed;
+};
+
+#endif
+
--- /dev/null
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# platform driver
+snd-soc-mt2701-afe-objs := mt2701-afe-pcm.o mt2701-afe-clock-ctrl.o
+obj-$(CONFIG_SND_SOC_MT2701) += snd-soc-mt2701-afe.o
+
+# machine driver
+obj-$(CONFIG_SND_SOC_MT2701_CS42448) += mt2701-cs42448.o
--- /dev/null
+/*
+ * mt2701-afe-clock-ctrl.c -- Mediatek 2701 afe clock ctrl
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <sound/soc.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "mt2701-afe-common.h"
+#include "mt2701-afe-clock-ctrl.h"
+
+static const char *aud_clks[MT2701_CLOCK_NUM] = {
+ [MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
+ [MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel",
+ [MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel",
+ [MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div",
+ [MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div",
+ [MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing",
+ [MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing",
+ [MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel",
+ [MT2701_AUD_APLL_SEL] = "top_apll_sel",
+ [MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M",
+ [MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M",
+ [MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M",
+ [MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M",
+ [MT2701_AUD_AUDPLL] = "top_audpll",
+ [MT2701_AUD_AUDPLL_D4] = "top_audpll_d4",
+ [MT2701_AUD_AUDPLL_D8] = "top_audpll_d8",
+ [MT2701_AUD_AUDPLL_D16] = "top_audpll_d16",
+ [MT2701_AUD_AUDPLL_D24] = "top_audpll_d24",
+ [MT2701_AUD_AUDINTBUS] = "top_audintbus_sel",
+ [MT2701_AUD_CLK_26M] = "clk_26m",
+ [MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4",
+ [MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel",
+ [MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel",
+ [MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel",
+ [MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel",
+ [MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel",
+ [MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel",
+ [MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div",
+ [MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div",
+ [MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div",
+ [MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div",
+ [MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div",
+ [MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div",
+ [MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk",
+ [MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk",
+ [MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk",
+ [MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk",
+ [MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk",
+ [MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk",
+ [MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel",
+ [MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel",
+ [MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4",
+ [MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2",
+ [MT2701_AUD_SYSPLL_D5] = "top_syspll_d5",
+};
+
+int mt2701_init_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i = 0;
+
+ for (i = 0; i < MT2701_CLOCK_NUM; i++) {
+ afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
+ if (IS_ERR(aud_clks[i])) {
+ dev_warn(afe->dev, "%s devm_clk_get %s fail\n",
+ __func__, aud_clks[i]);
+ return PTR_ERR(aud_clks[i]);
+ }
+ }
+
+ return 0;
+}
+
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
+{
+ int ret = 0;
+
+ ret = mt2701_turn_on_a1sys_clock(afe);
+ if (ret) {
+ dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = mt2701_turn_on_a2sys_clock(afe);
+ if (ret) {
+ dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n",
+ __func__, ret);
+ mt2701_turn_off_a1sys_clock(afe);
+ return ret;
+ }
+
+ ret = mt2701_turn_on_afe_clock(afe);
+ if (ret) {
+ dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n",
+ __func__, ret);
+ mt2701_turn_off_a1sys_clock(afe);
+ mt2701_turn_off_a2sys_clock(afe);
+ return ret;
+ }
+
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ AUDIO_TOP_CON0_A1SYS_A2SYS_ON,
+ AUDIO_TOP_CON0_A1SYS_A2SYS_ON);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON,
+ AFE_DAC_CON0_AFE_ON);
+ regmap_write(afe->regmap, PWR2_TOP_CON,
+ PWR2_TOP_CON_INIT_VAL);
+ regmap_write(afe->regmap, PWR1_ASM_CON1,
+ PWR1_ASM_CON1_INIT_VAL);
+ regmap_write(afe->regmap, PWR2_ASM_CON1,
+ PWR2_ASM_CON1_INIT_VAL);
+
+ return 0;
+}
+
+void mt2701_afe_disable_clock(struct mtk_base_afe *afe)
+{
+ mt2701_turn_off_afe_clock(afe);
+ mt2701_turn_off_a1sys_clock(afe);
+ mt2701_turn_off_a2sys_clock(afe);
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON, 0);
+}
+
+int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret = 0;
+
+ /* Set Mux */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
+ goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL],
+ afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX1_SEL],
+ aud_clks[MT2701_AUD_AUD1PLL_98M], ret);
+ goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
+ }
+
+ /* Set Divider */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__,
+ aud_clks[MT2701_AUD_AUD_MUX1_DIV],
+ ret);
+ goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
+ }
+
+ ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV],
+ MT2701_AUD_AUD_MUX1_DIV_RATE);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX1_DIV],
+ MT2701_AUD_AUD_MUX1_DIV_RATE, ret);
+ goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
+ }
+
+ /* Enable clock gate */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret);
+ goto A1SYS_CLK_AUD_48K_ERR;
+ }
+
+ /* Enable infra audio */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
+ goto A1SYS_CLK_INFRA_ERR;
+ }
+
+ return 0;
+
+A1SYS_CLK_INFRA_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+A1SYS_CLK_AUD_48K_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
+A1SYS_CLK_AUD_MUX1_DIV_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
+A1SYS_CLK_AUD_MUX1_SEL_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+
+ return ret;
+}
+
+void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+}
+
+int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret = 0;
+
+ /* Set Mux */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
+ goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL],
+ afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX2_SEL],
+ aud_clks[MT2701_AUD_AUD2PLL_90M], ret);
+ goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
+ }
+
+ /* Set Divider */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret);
+ goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
+ }
+
+ ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV],
+ MT2701_AUD_AUD_MUX2_DIV_RATE);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX2_DIV],
+ MT2701_AUD_AUD_MUX2_DIV_RATE, ret);
+ goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
+ }
+
+ /* Enable clock gate */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret);
+ goto A2SYS_CLK_AUD_44K_ERR;
+ }
+
+ /* Enable infra audio */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
+ goto A2SYS_CLK_INFRA_ERR;
+ }
+
+ return 0;
+
+A2SYS_CLK_INFRA_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+A2SYS_CLK_AUD_44K_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
+A2SYS_CLK_AUD_MUX2_DIV_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
+A2SYS_CLK_AUD_MUX2_SEL_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+
+ return ret;
+}
+
+void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+}
+
+int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* enable INFRA_SYS */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
+ goto AFE_AUD_INFRA_ERR;
+ }
+
+ /* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUDINTBUS], ret);
+ goto AFE_AUD_AUDINTBUS_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS],
+ afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUDINTBUS],
+ aud_clks[MT2701_AUD_SYSPLL1_D4], ret);
+ goto AFE_AUD_AUDINTBUS_ERR;
+ }
+
+ /* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret);
+ goto AFE_AUD_ASM_H_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL],
+ afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_ASM_H_SEL],
+ aud_clks[MT2701_AUD_UNIVPLL2_D2], ret);
+ goto AFE_AUD_ASM_H_ERR;
+ }
+
+ /* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret);
+ goto AFE_AUD_ASM_M_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL],
+ afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_ASM_M_SEL],
+ aud_clks[MT2701_AUD_UNIVPLL2_D4], ret);
+ goto AFE_AUD_ASM_M_ERR;
+ }
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_AFE, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_APLL_CK, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A1SYS, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A2SYS, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_AFE_CONN, 0);
+
+ return 0;
+
+AFE_AUD_ASM_M_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
+AFE_AUD_ASM_H_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+AFE_AUD_AUDINTBUS_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
+AFE_AUD_INFRA_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+
+ return ret;
+}
+
+void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_APLL_CK,
+ AUDIO_TOP_CON0_PDN_APLL_CK);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A1SYS,
+ AUDIO_TOP_CON4_PDN_A1SYS);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A2SYS,
+ AUDIO_TOP_CON4_PDN_A2SYS);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_AFE_CONN,
+ AUDIO_TOP_CON4_PDN_AFE_CONN);
+}
+
+void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
+ int mclk)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+ int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id;
+ int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id;
+
+ /* Set MCLK Kx_SRC_SEL(domain) */
+ ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[aud_src_clk_id], ret);
+
+ if (domain == 0) {
+ ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
+ afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[aud_src_clk_id],
+ aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
+ } else {
+ ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
+ afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[aud_src_clk_id],
+ aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
+ }
+ clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]);
+
+ /* Set MCLK Kx_SRC_DIV(divider) */
+ ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[aud_src_div_id], ret);
+
+ ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk);
+ if (ret)
+ dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__,
+ aud_clks[aud_src_div_id], mclk, ret);
+ clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]);
+}
+
+MODULE_DESCRIPTION("MT2701 afe clock control");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * mt2701-afe-clock-ctrl.h -- Mediatek 2701 afe clock ctrl definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT2701_AFE_CLOCK_CTRL_H_
+#define _MT2701_AFE_CLOCK_CTRL_H_
+
+struct mtk_base_afe;
+
+int mt2701_init_clock(struct mtk_base_afe *afe);
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe);
+void mt2701_afe_disable_clock(struct mtk_base_afe *afe);
+
+int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe);
+void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe);
+
+int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe);
+void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe);
+
+int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe);
+void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe);
+
+void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
+ int mclk);
+
+#endif
--- /dev/null
+/*
+ * mt2701-afe-common.h -- Mediatek 2701 audio driver definitions
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_2701_AFE_COMMON_H_
+#define _MT_2701_AFE_COMMON_H_
+#include <sound/soc.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include "mt2701-reg.h"
+#include "../common/mtk-base-afe.h"
+
+#define MT2701_STREAM_DIR_NUM (SNDRV_PCM_STREAM_LAST + 1)
+#define MT2701_PLL_DOMAIN_0_RATE 98304000
+#define MT2701_PLL_DOMAIN_1_RATE 90316800
+#define MT2701_AUD_AUD_MUX1_DIV_RATE (MT2701_PLL_DOMAIN_0_RATE / 2)
+#define MT2701_AUD_AUD_MUX2_DIV_RATE (MT2701_PLL_DOMAIN_1_RATE / 2)
+
+enum {
+ MT2701_I2S_1,
+ MT2701_I2S_2,
+ MT2701_I2S_3,
+ MT2701_I2S_4,
+ MT2701_I2S_NUM,
+};
+
+enum {
+ MT2701_MEMIF_DL1,
+ MT2701_MEMIF_DL2,
+ MT2701_MEMIF_DL3,
+ MT2701_MEMIF_DL4,
+ MT2701_MEMIF_DL5,
+ MT2701_MEMIF_DL_SINGLE_NUM,
+ MT2701_MEMIF_DLM = MT2701_MEMIF_DL_SINGLE_NUM,
+ MT2701_MEMIF_UL1,
+ MT2701_MEMIF_UL2,
+ MT2701_MEMIF_UL3,
+ MT2701_MEMIF_UL4,
+ MT2701_MEMIF_UL5,
+ MT2701_MEMIF_DLBT,
+ MT2701_MEMIF_ULBT,
+ MT2701_MEMIF_NUM,
+ MT2701_IO_I2S = MT2701_MEMIF_NUM,
+ MT2701_IO_2ND_I2S,
+ MT2701_IO_3RD_I2S,
+ MT2701_IO_4TH_I2S,
+ MT2701_IO_5TH_I2S,
+ MT2701_IO_6TH_I2S,
+ MT2701_IO_MRG,
+};
+
+enum {
+ MT2701_IRQ_ASYS_START,
+ MT2701_IRQ_ASYS_IRQ1 = MT2701_IRQ_ASYS_START,
+ MT2701_IRQ_ASYS_IRQ2,
+ MT2701_IRQ_ASYS_IRQ3,
+ MT2701_IRQ_ASYS_END,
+};
+
+/* 2701 clock def */
+enum audio_system_clock_type {
+ MT2701_AUD_INFRA_SYS_AUDIO,
+ MT2701_AUD_AUD_MUX1_SEL,
+ MT2701_AUD_AUD_MUX2_SEL,
+ MT2701_AUD_AUD_MUX1_DIV,
+ MT2701_AUD_AUD_MUX2_DIV,
+ MT2701_AUD_AUD_48K_TIMING,
+ MT2701_AUD_AUD_44K_TIMING,
+ MT2701_AUD_AUDPLL_MUX_SEL,
+ MT2701_AUD_APLL_SEL,
+ MT2701_AUD_AUD1PLL_98M,
+ MT2701_AUD_AUD2PLL_90M,
+ MT2701_AUD_HADDS2PLL_98M,
+ MT2701_AUD_HADDS2PLL_294M,
+ MT2701_AUD_AUDPLL,
+ MT2701_AUD_AUDPLL_D4,
+ MT2701_AUD_AUDPLL_D8,
+ MT2701_AUD_AUDPLL_D16,
+ MT2701_AUD_AUDPLL_D24,
+ MT2701_AUD_AUDINTBUS,
+ MT2701_AUD_CLK_26M,
+ MT2701_AUD_SYSPLL1_D4,
+ MT2701_AUD_AUD_K1_SRC_SEL,
+ MT2701_AUD_AUD_K2_SRC_SEL,
+ MT2701_AUD_AUD_K3_SRC_SEL,
+ MT2701_AUD_AUD_K4_SRC_SEL,
+ MT2701_AUD_AUD_K5_SRC_SEL,
+ MT2701_AUD_AUD_K6_SRC_SEL,
+ MT2701_AUD_AUD_K1_SRC_DIV,
+ MT2701_AUD_AUD_K2_SRC_DIV,
+ MT2701_AUD_AUD_K3_SRC_DIV,
+ MT2701_AUD_AUD_K4_SRC_DIV,
+ MT2701_AUD_AUD_K5_SRC_DIV,
+ MT2701_AUD_AUD_K6_SRC_DIV,
+ MT2701_AUD_AUD_I2S1_MCLK,
+ MT2701_AUD_AUD_I2S2_MCLK,
+ MT2701_AUD_AUD_I2S3_MCLK,
+ MT2701_AUD_AUD_I2S4_MCLK,
+ MT2701_AUD_AUD_I2S5_MCLK,
+ MT2701_AUD_AUD_I2S6_MCLK,
+ MT2701_AUD_ASM_M_SEL,
+ MT2701_AUD_ASM_H_SEL,
+ MT2701_AUD_UNIVPLL2_D4,
+ MT2701_AUD_UNIVPLL2_D2,
+ MT2701_AUD_SYSPLL_D5,
+ MT2701_CLOCK_NUM
+};
+
+static const unsigned int mt2701_afe_backup_list[] = {
+ AUDIO_TOP_CON0,
+ AUDIO_TOP_CON4,
+ AUDIO_TOP_CON5,
+ ASYS_TOP_CON,
+ AFE_CONN0,
+ AFE_CONN1,
+ AFE_CONN2,
+ AFE_CONN3,
+ AFE_CONN15,
+ AFE_CONN16,
+ AFE_CONN17,
+ AFE_CONN18,
+ AFE_CONN19,
+ AFE_CONN20,
+ AFE_CONN21,
+ AFE_CONN22,
+ AFE_DAC_CON0,
+ AFE_MEMIF_PBUF_SIZE,
+};
+
+struct snd_pcm_substream;
+struct mtk_base_irq_data;
+
+struct mt2701_i2s_data {
+ int i2s_ctrl_reg;
+ int i2s_pwn_shift;
+ int i2s_asrc_fs_shift;
+ int i2s_asrc_fs_mask;
+};
+
+enum mt2701_i2s_dir {
+ I2S_OUT,
+ I2S_IN,
+ I2S_DIR_NUM,
+};
+
+struct mt2701_i2s_path {
+ int dai_id;
+ int mclk_rate;
+ int on[I2S_DIR_NUM];
+ int occupied[I2S_DIR_NUM];
+ const struct mt2701_i2s_data *i2s_data[2];
+};
+
+struct mt2701_afe_private {
+ struct clk *clocks[MT2701_CLOCK_NUM];
+ struct mt2701_i2s_path i2s_path[MT2701_I2S_NUM];
+ bool mrg_enable[MT2701_STREAM_DIR_NUM];
+};
+
+#endif
--- /dev/null
+/*
+ * Mediatek ALSA SoC AFE platform driver for 2701
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ * Ir Lian <ir.lian@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+
+#include "mt2701-afe-common.h"
+
+#include "mt2701-afe-clock-ctrl.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+
+#define AFE_IRQ_STATUS_BITS 0xff
+
+static const struct snd_pcm_hardware mt2701_afe_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED
+ | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE,
+ .period_bytes_min = 1024,
+ .period_bytes_max = 1024 * 256,
+ .periods_min = 4,
+ .periods_max = 1024,
+ .buffer_bytes_max = 1024 * 1024 * 16,
+ .fifo_size = 0,
+};
+
+struct mt2701_afe_rate {
+ unsigned int rate;
+ unsigned int regvalue;
+};
+
+static const struct mt2701_afe_rate mt2701_afe_i2s_rates[] = {
+ { .rate = 8000, .regvalue = 0 },
+ { .rate = 12000, .regvalue = 1 },
+ { .rate = 16000, .regvalue = 2 },
+ { .rate = 24000, .regvalue = 3 },
+ { .rate = 32000, .regvalue = 4 },
+ { .rate = 48000, .regvalue = 5 },
+ { .rate = 96000, .regvalue = 6 },
+ { .rate = 192000, .regvalue = 7 },
+ { .rate = 384000, .regvalue = 8 },
+ { .rate = 7350, .regvalue = 16 },
+ { .rate = 11025, .regvalue = 17 },
+ { .rate = 14700, .regvalue = 18 },
+ { .rate = 22050, .regvalue = 19 },
+ { .rate = 29400, .regvalue = 20 },
+ { .rate = 44100, .regvalue = 21 },
+ { .rate = 88200, .regvalue = 22 },
+ { .rate = 176400, .regvalue = 23 },
+ { .rate = 352800, .regvalue = 24 },
+};
+
+static int mt2701_dai_num_to_i2s(struct mtk_base_afe *afe, int num)
+{
+ int val = num - MT2701_IO_I2S;
+
+ if (val < 0 || val >= MT2701_I2S_NUM) {
+ dev_err(afe->dev, "%s, num not available, num %d, val %d\n",
+ __func__, num, val);
+ return -EINVAL;
+ }
+ return val;
+}
+
+static int mt2701_afe_i2s_fs(unsigned int sample_rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt2701_afe_i2s_rates); i++)
+ if (mt2701_afe_i2s_rates[i].rate == sample_rate)
+ return mt2701_afe_i2s_rates[i].regvalue;
+
+ return -EINVAL;
+}
+
+static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
+ int ret = 0;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ /* enable mclk */
+ ret = clk_prepare_enable(afe_priv->clocks[clk_num]);
+ if (ret)
+ dev_err(afe->dev, "Failed to enable mclk for I2S: %d\n",
+ i2s_num);
+
+ return ret;
+}
+
+static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int dir_invert)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ const struct mt2701_i2s_data *i2s_data;
+ int stream_dir = substream->stream;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+
+ if (dir_invert) {
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
+ stream_dir = SNDRV_PCM_STREAM_CAPTURE;
+ else
+ stream_dir = SNDRV_PCM_STREAM_PLAYBACK;
+ }
+ i2s_data = i2s_path->i2s_data[stream_dir];
+
+ i2s_path->on[stream_dir]--;
+ if (i2s_path->on[stream_dir] < 0) {
+ dev_warn(afe->dev, "i2s_path->on: %d, dir: %d\n",
+ i2s_path->on[stream_dir], stream_dir);
+ i2s_path->on[stream_dir] = 0;
+ }
+ if (i2s_path->on[stream_dir])
+ return 0;
+
+ /* disable i2s */
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_I2S_EN, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ 1 << i2s_data->i2s_pwn_shift,
+ 1 << i2s_data->i2s_pwn_shift);
+ return 0;
+}
+
+static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
+
+ if (i2s_num < 0)
+ return;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+
+ if (i2s_path->occupied[substream->stream])
+ i2s_path->occupied[substream->stream] = 0;
+ else
+ goto I2S_UNSTART;
+
+ mt2701_afe_i2s_path_shutdown(substream, dai, 0);
+
+ /* need to disable i2s-out path when disable i2s-in */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ mt2701_afe_i2s_path_shutdown(substream, dai, 1);
+
+I2S_UNSTART:
+ /* disable mclk */
+ clk_disable_unprepare(afe_priv->clocks[clk_num]);
+}
+
+static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int dir_invert)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ const struct mt2701_i2s_data *i2s_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ int reg, fs, w_len = 1; /* now we support bck 64bits only */
+ int stream_dir = substream->stream;
+ unsigned int mask = 0, val = 0;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+
+ if (dir_invert) {
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
+ stream_dir = SNDRV_PCM_STREAM_CAPTURE;
+ else
+ stream_dir = SNDRV_PCM_STREAM_PLAYBACK;
+ }
+ i2s_data = i2s_path->i2s_data[stream_dir];
+
+ /* no need to enable if already done */
+ i2s_path->on[stream_dir]++;
+
+ if (i2s_path->on[stream_dir] != 1)
+ return 0;
+
+ fs = mt2701_afe_i2s_fs(runtime->rate);
+
+ mask = ASYS_I2S_CON_FS |
+ ASYS_I2S_CON_I2S_COUPLE_MODE | /* 0 */
+ ASYS_I2S_CON_I2S_MODE |
+ ASYS_I2S_CON_WIDE_MODE;
+
+ val = ASYS_I2S_CON_FS_SET(fs) |
+ ASYS_I2S_CON_I2S_MODE |
+ ASYS_I2S_CON_WIDE_MODE_SET(w_len);
+
+ if (stream_dir == SNDRV_PCM_STREAM_CAPTURE) {
+ mask |= ASYS_I2S_IN_PHASE_FIX;
+ val |= ASYS_I2S_IN_PHASE_FIX;
+ }
+
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, mask, val);
+
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
+ reg = ASMO_TIMING_CON1;
+ else
+ reg = ASMI_TIMING_CON1;
+
+ regmap_update_bits(afe->regmap, reg,
+ i2s_data->i2s_asrc_fs_mask
+ << i2s_data->i2s_asrc_fs_shift,
+ fs << i2s_data->i2s_asrc_fs_shift);
+
+ /* enable i2s */
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ 1 << i2s_data->i2s_pwn_shift,
+ 0 << i2s_data->i2s_pwn_shift);
+
+ /* reset i2s hw status before enable */
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_RESET, ASYS_I2S_CON_RESET);
+ udelay(1);
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_RESET, 0);
+ udelay(1);
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_I2S_EN, ASYS_I2S_CON_I2S_EN);
+ return 0;
+}
+
+static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int clk_domain;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ int mclk_rate;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+ mclk_rate = i2s_path->mclk_rate;
+
+ if (i2s_path->occupied[substream->stream])
+ return -EBUSY;
+ i2s_path->occupied[substream->stream] = 1;
+
+ if (MT2701_PLL_DOMAIN_0_RATE % mclk_rate == 0) {
+ clk_domain = 0;
+ } else if (MT2701_PLL_DOMAIN_1_RATE % mclk_rate == 0) {
+ clk_domain = 1;
+ } else {
+ dev_err(dai->dev, "%s() bad mclk rate %d\n",
+ __func__, mclk_rate);
+ return -EINVAL;
+ }
+ mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ } else {
+ /* need to enable i2s-out path when enable i2s-in */
+ /* prepare for another direction "out" */
+ mt2701_i2s_path_prepare_enable(substream, dai, 1);
+ /* prepare for "in" */
+ mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ }
+
+ return 0;
+}
+
+static int mt2701_afe_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ /* mclk */
+ if (dir == SND_SOC_CLOCK_IN) {
+ dev_warn(dai->dev,
+ "%s() warning: mt2701 doesn't support mclk input\n",
+ __func__);
+ return -EINVAL;
+ }
+ afe_priv->i2s_path[i2s_num].mclk_rate = freq;
+ return 0;
+}
+
+static int mt2701_btmrg_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_MRGIF, 0);
+
+ afe_priv->mrg_enable[substream->stream] = 1;
+ return 0;
+}
+
+static int mt2701_btmrg_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int stream_fs;
+ u32 val, msk;
+
+ stream_fs = params_rate(params);
+
+ if ((stream_fs != 8000) && (stream_fs != 16000)) {
+ dev_err(afe->dev, "%s() btmgr not supprt this stream_fs %d\n",
+ __func__, stream_fs);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_I2S_MODE_MASK,
+ AFE_MRGIF_CON_I2S_MODE_32K);
+
+ val = AFE_DAIBT_CON0_BT_FUNC_EN | AFE_DAIBT_CON0_BT_FUNC_RDY
+ | AFE_DAIBT_CON0_MRG_USE;
+ msk = val;
+
+ if (stream_fs == 16000)
+ val |= AFE_DAIBT_CON0_BT_WIDE_MODE_EN;
+
+ msk |= AFE_DAIBT_CON0_BT_WIDE_MODE_EN;
+
+ regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, msk, val);
+
+ regmap_update_bits(afe->regmap, AFE_DAIBT_CON0,
+ AFE_DAIBT_CON0_DAIBT_EN,
+ AFE_DAIBT_CON0_DAIBT_EN);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_I2S_EN,
+ AFE_MRGIF_CON_MRG_I2S_EN);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_EN,
+ AFE_MRGIF_CON_MRG_EN);
+ return 0;
+}
+
+static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ /* if the other direction stream is not occupied */
+ if (!afe_priv->mrg_enable[!substream->stream]) {
+ regmap_update_bits(afe->regmap, AFE_DAIBT_CON0,
+ AFE_DAIBT_CON0_DAIBT_EN, 0);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_EN, 0);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_I2S_EN, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_MRGIF,
+ AUDIO_TOP_CON4_PDN_MRGIF);
+ }
+ afe_priv->mrg_enable[substream->stream] = 0;
+}
+
+static int mt2701_simple_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int stream_dir = substream->stream;
+ int memif_num = rtd->cpu_dai->id;
+ struct mtk_base_afe_memif *memif_tmp;
+
+ /* can't run single DL & DLM at the same time */
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ memif_tmp = &afe->memif[MT2701_MEMIF_DLM];
+ if (memif_tmp->substream) {
+ dev_warn(afe->dev, "%s memif is not available, stream_dir %d, memif_num %d\n",
+ __func__, stream_dir, memif_num);
+ return -EBUSY;
+ }
+ }
+ return mtk_afe_fe_startup(substream, dai);
+}
+
+static int mt2701_simple_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int stream_dir = substream->stream;
+
+ /* single DL use PAIR_INTERLEAVE */
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_MASK,
+ AFE_MEMIF_PBUF_SIZE_PAIR_INTERLEAVE);
+ }
+ return mtk_afe_fe_hw_params(substream, params, dai);
+}
+
+static int mt2701_dlm_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif_tmp;
+ const struct mtk_base_memif_data *memif_data;
+ int i;
+
+ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) {
+ memif_tmp = &afe->memif[i];
+ if (memif_tmp->substream)
+ return -EBUSY;
+ }
+
+ /* enable agent for all signal DL (due to hw design) */
+ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) {
+ memif_data = afe->memif[i].data;
+ regmap_update_bits(afe->regmap,
+ memif_data->agent_disable_reg,
+ 1 << memif_data->agent_disable_shift,
+ 0 << memif_data->agent_disable_shift);
+ }
+
+ return mtk_afe_fe_startup(substream, dai);
+}
+
+static void mt2701_dlm_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ const struct mtk_base_memif_data *memif_data;
+ int i;
+
+ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) {
+ memif_data = afe->memif[i].data;
+ regmap_update_bits(afe->regmap,
+ memif_data->agent_disable_reg,
+ 1 << memif_data->agent_disable_shift,
+ 1 << memif_data->agent_disable_shift);
+ }
+ return mtk_afe_fe_shutdown(substream, dai);
+}
+
+static int mt2701_dlm_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int channels = params_channels(params);
+
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_MASK,
+ AFE_MEMIF_PBUF_SIZE_FULL_INTERLEAVE);
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_BYTE_MASK,
+ AFE_MEMIF_PBUF_SIZE_DLM_32BYTES);
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_CH_MASK,
+ AFE_MEMIF_PBUF_SIZE_DLM_CH(channels));
+
+ return mtk_afe_fe_hw_params(substream, params, dai);
+}
+
+static int mt2701_dlm_fe_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif_tmp = &afe->memif[MT2701_MEMIF_DL1];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ regmap_update_bits(afe->regmap, memif_tmp->data->enable_reg,
+ 1 << memif_tmp->data->enable_shift,
+ 1 << memif_tmp->data->enable_shift);
+ mtk_afe_fe_trigger(substream, cmd, dai);
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ mtk_afe_fe_trigger(substream, cmd, dai);
+ regmap_update_bits(afe->regmap, memif_tmp->data->enable_reg,
+ 1 << memif_tmp->data->enable_shift, 0);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt2701_memif_fs(struct snd_pcm_substream *substream,
+ unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int fs;
+
+ if (rtd->cpu_dai->id != MT2701_MEMIF_ULBT)
+ fs = mt2701_afe_i2s_fs(rate);
+ else
+ fs = (rate == 16000 ? 1 : 0);
+ return fs;
+}
+
+static int mt2701_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
+{
+ return mt2701_afe_i2s_fs(rate);
+}
+
+/* FE DAIs */
+static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = {
+ .startup = mt2701_simple_fe_startup,
+ .shutdown = mtk_afe_fe_shutdown,
+ .hw_params = mt2701_simple_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mtk_afe_fe_trigger,
+
+};
+
+static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = {
+ .startup = mt2701_dlm_fe_startup,
+ .shutdown = mt2701_dlm_fe_shutdown,
+ .hw_params = mt2701_dlm_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mt2701_dlm_fe_trigger,
+};
+
+/* I2S BE DAIs */
+static const struct snd_soc_dai_ops mt2701_afe_i2s_ops = {
+ .startup = mt2701_afe_i2s_startup,
+ .shutdown = mt2701_afe_i2s_shutdown,
+ .prepare = mt2701_afe_i2s_prepare,
+ .set_sysclk = mt2701_afe_i2s_set_sysclk,
+};
+
+/* MRG BE DAIs */
+static struct snd_soc_dai_ops mt2701_btmrg_ops = {
+ .startup = mt2701_btmrg_startup,
+ .shutdown = mt2701_btmrg_shutdown,
+ .hw_params = mt2701_btmrg_hw_params,
+};
+
+static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = {
+ /* FE DAIs: memory intefaces to CPU */
+ {
+ .name = "PCM_multi",
+ .id = MT2701_MEMIF_DLM,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .playback = {
+ .stream_name = "DLM",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .ops = &mt2701_dlm_memif_dai_ops,
+ },
+ {
+ .name = "PCM0",
+ .id = MT2701_MEMIF_UL1,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "UL1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ {
+ .name = "PCM1",
+ .id = MT2701_MEMIF_UL2,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "UL2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ {
+ .name = "PCM_BT_DL",
+ .id = MT2701_MEMIF_DLBT,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .playback = {
+ .stream_name = "DLBT",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ {
+ .name = "PCM_BT_UL",
+ .id = MT2701_MEMIF_ULBT,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "ULBT",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ /* BE DAIs */
+ {
+ .name = "I2S0",
+ .id = MT2701_IO_I2S,
+ .playback = {
+ .stream_name = "I2S0 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .capture = {
+ .stream_name = "I2S0 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "I2S1",
+ .id = MT2701_IO_2ND_I2S,
+ .playback = {
+ .stream_name = "I2S1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .capture = {
+ .stream_name = "I2S1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "I2S2",
+ .id = MT2701_IO_3RD_I2S,
+ .playback = {
+ .stream_name = "I2S2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .capture = {
+ .stream_name = "I2S2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "I2S3",
+ .id = MT2701_IO_4TH_I2S,
+ .playback = {
+ .stream_name = "I2S3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .capture = {
+ .stream_name = "I2S3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "MRG BT",
+ .id = MT2701_IO_MRG,
+ .playback = {
+ .stream_name = "BT Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "BT Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt2701_btmrg_ops,
+ .symmetric_rates = 1,
+ }
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o00_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I00 Switch", AFE_CONN0, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o01_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I01 Switch", AFE_CONN1, 1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o02_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I02 Switch", AFE_CONN2, 2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o03_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 3, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o14_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I26 Switch", AFE_CONN14, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o15_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I12 Switch", AFE_CONN15, 12, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o16_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I13 Switch", AFE_CONN16, 13, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o17_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I14 Switch", AFE_CONN17, 14, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o18_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I15 Switch", AFE_CONN18, 15, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o19_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I16 Switch", AFE_CONN19, 16, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o20_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN20, 17, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o21_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN21, 18, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o22_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I19 Switch", AFE_CONN22, 19, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o23_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I20 Switch", AFE_CONN23, 20, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o24_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I21 Switch", AFE_CONN24, 21, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o31_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I35 Switch", AFE_CONN41, 9, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_i02_mix[] = {
+ SOC_DAPM_SINGLE("I2S0 Switch", SND_SOC_NOPM, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s0[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S0 Out Switch",
+ ASYS_I2SO1_CON, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s1[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S1 Out Switch",
+ ASYS_I2SO2_CON, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s2[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S2 Out Switch",
+ PWR2_TOP_CON, 17, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s3[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S3 Out Switch",
+ PWR2_TOP_CON, 18, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S4 Out Switch",
+ PWR2_TOP_CON, 19, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc0[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc0 out Switch", AUDIO_TOP_CON4, 14, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc1[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc1 out Switch", AUDIO_TOP_CON4, 15, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc2[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc2 out Switch", PWR2_TOP_CON, 6, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc3[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc3 out Switch", PWR2_TOP_CON, 7, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc4[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc4 out Switch", PWR2_TOP_CON, 8, 1,
+ 1),
+};
+
+static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I01", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I02", SND_SOC_NOPM, 0, 0, mt2701_afe_i02_mix,
+ ARRAY_SIZE(mt2701_afe_i02_mix)),
+ SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I12", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I13", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I14", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I15", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I16", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I17", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I19", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I26", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I35", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("O00", SND_SOC_NOPM, 0, 0, mt2701_afe_o00_mix,
+ ARRAY_SIZE(mt2701_afe_o00_mix)),
+ SND_SOC_DAPM_MIXER("O01", SND_SOC_NOPM, 0, 0, mt2701_afe_o01_mix,
+ ARRAY_SIZE(mt2701_afe_o01_mix)),
+ SND_SOC_DAPM_MIXER("O02", SND_SOC_NOPM, 0, 0, mt2701_afe_o02_mix,
+ ARRAY_SIZE(mt2701_afe_o02_mix)),
+ SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0, mt2701_afe_o03_mix,
+ ARRAY_SIZE(mt2701_afe_o03_mix)),
+ SND_SOC_DAPM_MIXER("O14", SND_SOC_NOPM, 0, 0, mt2701_afe_o14_mix,
+ ARRAY_SIZE(mt2701_afe_o14_mix)),
+ SND_SOC_DAPM_MIXER("O15", SND_SOC_NOPM, 0, 0, mt2701_afe_o15_mix,
+ ARRAY_SIZE(mt2701_afe_o15_mix)),
+ SND_SOC_DAPM_MIXER("O16", SND_SOC_NOPM, 0, 0, mt2701_afe_o16_mix,
+ ARRAY_SIZE(mt2701_afe_o16_mix)),
+ SND_SOC_DAPM_MIXER("O17", SND_SOC_NOPM, 0, 0, mt2701_afe_o17_mix,
+ ARRAY_SIZE(mt2701_afe_o17_mix)),
+ SND_SOC_DAPM_MIXER("O18", SND_SOC_NOPM, 0, 0, mt2701_afe_o18_mix,
+ ARRAY_SIZE(mt2701_afe_o18_mix)),
+ SND_SOC_DAPM_MIXER("O19", SND_SOC_NOPM, 0, 0, mt2701_afe_o19_mix,
+ ARRAY_SIZE(mt2701_afe_o19_mix)),
+ SND_SOC_DAPM_MIXER("O20", SND_SOC_NOPM, 0, 0, mt2701_afe_o20_mix,
+ ARRAY_SIZE(mt2701_afe_o20_mix)),
+ SND_SOC_DAPM_MIXER("O21", SND_SOC_NOPM, 0, 0, mt2701_afe_o21_mix,
+ ARRAY_SIZE(mt2701_afe_o21_mix)),
+ SND_SOC_DAPM_MIXER("O22", SND_SOC_NOPM, 0, 0, mt2701_afe_o22_mix,
+ ARRAY_SIZE(mt2701_afe_o22_mix)),
+ SND_SOC_DAPM_MIXER("O31", SND_SOC_NOPM, 0, 0, mt2701_afe_o31_mix,
+ ARRAY_SIZE(mt2701_afe_o31_mix)),
+
+ SND_SOC_DAPM_MIXER("I12I13", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s0,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s0)),
+ SND_SOC_DAPM_MIXER("I14I15", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s1,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s1)),
+ SND_SOC_DAPM_MIXER("I16I17", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s2,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s2)),
+ SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s3,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)),
+
+ SND_SOC_DAPM_MIXER("ASRC_O0", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc0,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc0)),
+ SND_SOC_DAPM_MIXER("ASRC_O1", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc1,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc1)),
+ SND_SOC_DAPM_MIXER("ASRC_O2", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc2,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc2)),
+ SND_SOC_DAPM_MIXER("ASRC_O3", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc3,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc3)),
+};
+
+static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
+ {"I12", NULL, "DL1"},
+ {"I13", NULL, "DL1"},
+ {"I35", NULL, "DLBT"},
+
+ {"I2S0 Playback", NULL, "O15"},
+ {"I2S0 Playback", NULL, "O16"},
+
+ {"I2S1 Playback", NULL, "O17"},
+ {"I2S1 Playback", NULL, "O18"},
+ {"I2S2 Playback", NULL, "O19"},
+ {"I2S2 Playback", NULL, "O20"},
+ {"I2S3 Playback", NULL, "O21"},
+ {"I2S3 Playback", NULL, "O22"},
+ {"BT Playback", NULL, "O31"},
+
+ {"UL1", NULL, "O00"},
+ {"UL1", NULL, "O01"},
+ {"UL2", NULL, "O02"},
+ {"UL2", NULL, "O03"},
+ {"ULBT", NULL, "O14"},
+
+ {"I00", NULL, "I2S0 Capture"},
+ {"I01", NULL, "I2S0 Capture"},
+
+ {"I02", NULL, "I2S1 Capture"},
+ {"I03", NULL, "I2S1 Capture"},
+ /* I02,03 link to UL2, also need to open I2S0 */
+ {"I02", "I2S0 Switch", "I2S0 Capture"},
+
+ {"I26", NULL, "BT Capture"},
+
+ {"ASRC_O0", "Asrc0 out Switch", "DLM"},
+ {"ASRC_O1", "Asrc1 out Switch", "DLM"},
+ {"ASRC_O2", "Asrc2 out Switch", "DLM"},
+ {"ASRC_O3", "Asrc3 out Switch", "DLM"},
+
+ {"I12I13", "Multich I2S0 Out Switch", "ASRC_O0"},
+ {"I14I15", "Multich I2S1 Out Switch", "ASRC_O1"},
+ {"I16I17", "Multich I2S2 Out Switch", "ASRC_O2"},
+ {"I18I19", "Multich I2S3 Out Switch", "ASRC_O3"},
+
+ { "I12", NULL, "I12I13" },
+ { "I13", NULL, "I12I13" },
+ { "I14", NULL, "I14I15" },
+ { "I15", NULL, "I14I15" },
+ { "I16", NULL, "I16I17" },
+ { "I17", NULL, "I16I17" },
+ { "I18", NULL, "I18I19" },
+ { "I19", NULL, "I18I19" },
+
+ { "O00", "I00 Switch", "I00" },
+ { "O01", "I01 Switch", "I01" },
+ { "O02", "I02 Switch", "I02" },
+ { "O03", "I03 Switch", "I03" },
+ { "O14", "I26 Switch", "I26" },
+ { "O15", "I12 Switch", "I12" },
+ { "O16", "I13 Switch", "I13" },
+ { "O17", "I14 Switch", "I14" },
+ { "O18", "I15 Switch", "I15" },
+ { "O19", "I16 Switch", "I16" },
+ { "O20", "I17 Switch", "I17" },
+ { "O21", "I18 Switch", "I18" },
+ { "O22", "I19 Switch", "I19" },
+ { "O31", "I35 Switch", "I35" },
+
+};
+
+static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = {
+ .name = "mt2701-afe-pcm-dai",
+ .dapm_widgets = mt2701_afe_pcm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt2701_afe_pcm_widgets),
+ .dapm_routes = mt2701_afe_pcm_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt2701_afe_pcm_routes),
+};
+
+static const struct mtk_base_memif_data memif_data[MT2701_MEMIF_NUM] = {
+ {
+ .name = "DL1",
+ .id = MT2701_MEMIF_DL1,
+ .reg_ofs_base = AFE_DL1_BASE,
+ .reg_ofs_cur = AFE_DL1_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 0,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 16,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 1,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 0,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 6,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL2",
+ .id = MT2701_MEMIF_DL2,
+ .reg_ofs_base = AFE_DL2_BASE,
+ .reg_ofs_cur = AFE_DL2_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 5,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 17,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 2,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 2,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 7,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL3",
+ .id = MT2701_MEMIF_DL3,
+ .reg_ofs_base = AFE_DL3_BASE,
+ .reg_ofs_cur = AFE_DL3_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 10,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 18,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 3,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 4,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 8,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL4",
+ .id = MT2701_MEMIF_DL4,
+ .reg_ofs_base = AFE_DL4_BASE,
+ .reg_ofs_cur = AFE_DL4_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 15,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 19,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 4,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 6,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 9,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL5",
+ .id = MT2701_MEMIF_DL5,
+ .reg_ofs_base = AFE_DL5_BASE,
+ .reg_ofs_cur = AFE_DL5_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 20,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 20,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 5,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 8,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 10,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DLM",
+ .id = MT2701_MEMIF_DLM,
+ .reg_ofs_base = AFE_DLMCH_BASE,
+ .reg_ofs_cur = AFE_DLMCH_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 0,
+ .fs_maskbit = 0x1f,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 7,
+ .hd_reg = AFE_MEMIF_PBUF_SIZE,
+ .hd_shift = 28,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 12,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL1",
+ .id = MT2701_MEMIF_UL1,
+ .reg_ofs_base = AFE_VUL_BASE,
+ .reg_ofs_cur = AFE_VUL_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 0,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 0,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 10,
+ .hd_reg = AFE_MEMIF_HD_CON1,
+ .hd_shift = 0,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 0,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL2",
+ .id = MT2701_MEMIF_UL2,
+ .reg_ofs_base = AFE_UL2_BASE,
+ .reg_ofs_cur = AFE_UL2_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 5,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 2,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 11,
+ .hd_reg = AFE_MEMIF_HD_CON1,
+ .hd_shift = 2,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL3",
+ .id = MT2701_MEMIF_UL3,
+ .reg_ofs_base = AFE_UL3_BASE,
+ .reg_ofs_cur = AFE_UL3_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 10,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 4,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 12,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 0,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 2,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL4",
+ .id = MT2701_MEMIF_UL4,
+ .reg_ofs_base = AFE_UL4_BASE,
+ .reg_ofs_cur = AFE_UL4_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 15,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 6,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 13,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 6,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 3,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL5",
+ .id = MT2701_MEMIF_UL5,
+ .reg_ofs_base = AFE_UL5_BASE,
+ .reg_ofs_cur = AFE_UL5_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 20,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 8,
+ .fs_maskbit = 0x1f,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 14,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 8,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 4,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DLBT",
+ .id = MT2701_MEMIF_DLBT,
+ .reg_ofs_base = AFE_ARB1_BASE,
+ .reg_ofs_cur = AFE_ARB1_CUR,
+ .fs_reg = AFE_DAC_CON3,
+ .fs_shift = 10,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 22,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 8,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 14,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 13,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "ULBT",
+ .id = MT2701_MEMIF_ULBT,
+ .reg_ofs_base = AFE_DAI_BASE,
+ .reg_ofs_cur = AFE_DAI_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 30,
+ .fs_maskbit = 0x1,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 17,
+ .hd_reg = AFE_MEMIF_HD_CON1,
+ .hd_shift = 20,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 16,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+};
+
+static const struct mtk_base_irq_data irq_data[MT2701_IRQ_ASYS_END] = {
+ {
+ .id = MT2701_IRQ_ASYS_IRQ1,
+ .irq_cnt_reg = ASYS_IRQ1_CON,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0xffffff,
+ .irq_fs_reg = ASYS_IRQ1_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0x1f,
+ .irq_en_reg = ASYS_IRQ1_CON,
+ .irq_en_shift = 31,
+ .irq_clr_reg = ASYS_IRQ_CLR,
+ .irq_clr_shift = 0,
+ },
+ {
+ .id = MT2701_IRQ_ASYS_IRQ2,
+ .irq_cnt_reg = ASYS_IRQ2_CON,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0xffffff,
+ .irq_fs_reg = ASYS_IRQ2_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0x1f,
+ .irq_en_reg = ASYS_IRQ2_CON,
+ .irq_en_shift = 31,
+ .irq_clr_reg = ASYS_IRQ_CLR,
+ .irq_clr_shift = 1,
+ },
+ {
+ .id = MT2701_IRQ_ASYS_IRQ3,
+ .irq_cnt_reg = ASYS_IRQ3_CON,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0xffffff,
+ .irq_fs_reg = ASYS_IRQ3_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0x1f,
+ .irq_en_reg = ASYS_IRQ3_CON,
+ .irq_en_shift = 31,
+ .irq_clr_reg = ASYS_IRQ_CLR,
+ .irq_clr_shift = 2,
+ }
+};
+
+static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO1_CON,
+ .i2s_pwn_shift = 6,
+ .i2s_asrc_fs_shift = 0,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN1_CON,
+ .i2s_pwn_shift = 0,
+ .i2s_asrc_fs_shift = 0,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO2_CON,
+ .i2s_pwn_shift = 7,
+ .i2s_asrc_fs_shift = 5,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN2_CON,
+ .i2s_pwn_shift = 1,
+ .i2s_asrc_fs_shift = 5,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO3_CON,
+ .i2s_pwn_shift = 8,
+ .i2s_asrc_fs_shift = 10,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN3_CON,
+ .i2s_pwn_shift = 2,
+ .i2s_asrc_fs_shift = 10,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO4_CON,
+ .i2s_pwn_shift = 9,
+ .i2s_asrc_fs_shift = 15,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN4_CON,
+ .i2s_pwn_shift = 3,
+ .i2s_asrc_fs_shift = 15,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+};
+
+static const struct regmap_config mt2701_afe_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = AFE_END_ADDR,
+ .cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t mt2701_asys_isr(int irq_id, void *dev)
+{
+ int id;
+ struct mtk_base_afe *afe = dev;
+ struct mtk_base_afe_memif *memif;
+ struct mtk_base_afe_irq *irq;
+ u32 status;
+
+ regmap_read(afe->regmap, ASYS_IRQ_STATUS, &status);
+ regmap_write(afe->regmap, ASYS_IRQ_CLR, status);
+
+ for (id = 0; id < MT2701_MEMIF_NUM; ++id) {
+ memif = &afe->memif[id];
+ if (memif->irq_usage < 0)
+ continue;
+ irq = &afe->irqs[memif->irq_usage];
+ if (status & 1 << (irq->irq_data->irq_clr_shift))
+ snd_pcm_period_elapsed(memif->substream);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mt2701_afe_runtime_suspend(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+
+ mt2701_afe_disable_clock(afe);
+ return 0;
+}
+
+static int mt2701_afe_runtime_resume(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+
+ return mt2701_afe_enable_clock(afe);
+}
+
+static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ unsigned int irq_id;
+ struct mtk_base_afe *afe;
+ struct mt2701_afe_private *afe_priv;
+ struct resource *res;
+ struct device *dev;
+
+ ret = 0;
+ afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+ afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ GFP_KERNEL);
+ if (!afe->platform_priv)
+ return -ENOMEM;
+ afe_priv = afe->platform_priv;
+
+ afe->dev = &pdev->dev;
+ dev = afe->dev;
+
+ irq_id = platform_get_irq(pdev, 0);
+ if (!irq_id) {
+ dev_err(dev, "%s no irq found\n", dev->of_node->name);
+ return -ENXIO;
+ }
+ ret = devm_request_irq(dev, irq_id, mt2701_asys_isr,
+ IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
+ if (ret) {
+ dev_err(dev, "could not request_irq for asys-isr\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(afe->base_addr))
+ return PTR_ERR(afe->base_addr);
+
+ afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
+ &mt2701_afe_regmap_config);
+ if (IS_ERR(afe->regmap))
+ return PTR_ERR(afe->regmap);
+
+ mutex_init(&afe->irq_alloc_lock);
+
+ /* memif initialize */
+ afe->memif_size = MT2701_MEMIF_NUM;
+ afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
+ GFP_KERNEL);
+
+ if (!afe->memif)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->memif_size; i++) {
+ afe->memif[i].data = &memif_data[i];
+ afe->memif[i].irq_usage = -1;
+ }
+
+ /* irq initialize */
+ afe->irqs_size = MT2701_IRQ_ASYS_END;
+ afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
+ GFP_KERNEL);
+
+ if (!afe->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->irqs_size; i++)
+ afe->irqs[i].irq_data = &irq_data[i];
+
+ /* I2S initialize */
+ for (i = 0; i < MT2701_I2S_NUM; i++) {
+ afe_priv->i2s_path[i].i2s_data[I2S_OUT]
+ = &mt2701_i2s_data[i][I2S_OUT];
+ afe_priv->i2s_path[i].i2s_data[I2S_IN]
+ = &mt2701_i2s_data[i][I2S_IN];
+ }
+
+ afe->mtk_afe_hardware = &mt2701_afe_hardware;
+ afe->memif_fs = mt2701_memif_fs;
+ afe->irq_fs = mt2701_irq_fs;
+
+ afe->reg_back_up_list = mt2701_afe_backup_list;
+ afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list);
+ afe->runtime_resume = mt2701_afe_runtime_resume;
+ afe->runtime_suspend = mt2701_afe_runtime_suspend;
+
+ /* initial audio related clock */
+ ret = mt2701_init_clock(afe);
+ if (ret) {
+ dev_err(dev, "init clock error\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, afe);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev))
+ goto err_pm_disable;
+
+ ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+ if (ret) {
+ dev_warn(dev, "err_platform\n");
+ goto err_platform;
+ }
+
+ ret = snd_soc_register_component(&pdev->dev,
+ &mt2701_afe_pcm_dai_component,
+ mt2701_afe_pcm_dais,
+ ARRAY_SIZE(mt2701_afe_pcm_dais));
+ if (ret) {
+ dev_warn(dev, "err_dai_component\n");
+ goto err_dai_component;
+ }
+
+ mt2701_afe_runtime_resume(&pdev->dev);
+
+ return 0;
+
+err_dai_component:
+ snd_soc_unregister_component(&pdev->dev);
+
+err_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev)
+{
+ struct mtk_base_afe *afe = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mt2701_afe_runtime_suspend(&pdev->dev);
+
+ snd_soc_unregister_component(&pdev->dev);
+ snd_soc_unregister_platform(&pdev->dev);
+ /* disable afe clock */
+ mt2701_afe_disable_clock(afe);
+ return 0;
+}
+
+static const struct of_device_id mt2701_afe_pcm_dt_match[] = {
+ { .compatible = "mediatek,mt2701-audio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt2701_afe_pcm_dt_match);
+
+static const struct dev_pm_ops mt2701_afe_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt2701_afe_runtime_suspend,
+ mt2701_afe_runtime_resume, NULL)
+};
+
+static struct platform_driver mt2701_afe_pcm_driver = {
+ .driver = {
+ .name = "mt2701-audio",
+ .of_match_table = mt2701_afe_pcm_dt_match,
+#ifdef CONFIG_PM
+ .pm = &mt2701_afe_pm_ops,
+#endif
+ },
+ .probe = mt2701_afe_pcm_dev_probe,
+ .remove = mt2701_afe_pcm_dev_remove,
+};
+
+module_platform_driver(mt2701_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
--- /dev/null
+/*
+ * mt2701-cs42448.c -- MT2701 CS42448 ALSA SoC machine driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ir Lian <ir.lian@mediatek.com>
+ * Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_gpio.h>
+
+#include "mt2701-afe-common.h"
+
+struct mt2701_cs42448_private {
+ int i2s1_in_mux;
+ int i2s1_in_mux_gpio_sel_1;
+ int i2s1_in_mux_gpio_sel_2;
+};
+
+static const char * const i2sin_mux_switch_text[] = {
+ "ADC_SDOUT2",
+ "ADC_SDOUT3",
+ "I2S_IN_1",
+ "I2S_IN_2",
+};
+
+static const struct soc_enum i2sin_mux_enum =
+ SOC_ENUM_SINGLE_EXT(4, i2sin_mux_switch_text);
+
+static int mt2701_cs42448_i2sin1_mux_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+ struct mt2701_cs42448_private *priv = snd_soc_card_get_drvdata(card);
+
+ ucontrol->value.integer.value[0] = priv->i2s1_in_mux;
+ return 0;
+}
+
+static int mt2701_cs42448_i2sin1_mux_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+ struct mt2701_cs42448_private *priv = snd_soc_card_get_drvdata(card);
+
+ if (ucontrol->value.integer.value[0] == priv->i2s1_in_mux)
+ return 0;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 0);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 0);
+ break;
+ case 1:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 1);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 0);
+ break;
+ case 2:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 0);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 1);
+ break;
+ case 3:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 1);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 1);
+ break;
+ default:
+ dev_warn(card->dev, "%s invalid setting\n", __func__);
+ }
+
+ priv->i2s1_in_mux = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget
+ mt2701_cs42448_asoc_card_dapm_widgets[] = {
+ SND_SOC_DAPM_LINE("Line Out Jack", NULL),
+ SND_SOC_DAPM_MIC("AMIC", NULL),
+ SND_SOC_DAPM_LINE("Tuner In", NULL),
+ SND_SOC_DAPM_LINE("Satellite Tuner In", NULL),
+ SND_SOC_DAPM_LINE("AUX In", NULL),
+};
+
+static const struct snd_kcontrol_new mt2701_cs42448_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Line Out Jack"),
+ SOC_DAPM_PIN_SWITCH("AMIC"),
+ SOC_DAPM_PIN_SWITCH("Tuner In"),
+ SOC_DAPM_PIN_SWITCH("Satellite Tuner In"),
+ SOC_DAPM_PIN_SWITCH("AUX In"),
+ SOC_ENUM_EXT("I2SIN1_MUX_Switch", i2sin_mux_enum,
+ mt2701_cs42448_i2sin1_mux_get,
+ mt2701_cs42448_i2sin1_mux_set),
+};
+
+static const unsigned int mt2701_cs42448_sampling_rates[] = {48000};
+
+static struct snd_pcm_hw_constraint_list mt2701_cs42448_constraints_rates = {
+ .count = ARRAY_SIZE(mt2701_cs42448_sampling_rates),
+ .list = mt2701_cs42448_sampling_rates,
+ .mask = 0,
+};
+
+static int mt2701_cs42448_fe_ops_startup(struct snd_pcm_substream *substream)
+{
+ int err;
+
+ err = snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &mt2701_cs42448_constraints_rates);
+ if (err < 0) {
+ dev_err(substream->pcm->card->dev,
+ "%s snd_pcm_hw_constraint_list failed: 0x%x\n",
+ __func__, err);
+ return err;
+ }
+ return 0;
+}
+
+static struct snd_soc_ops mt2701_cs42448_48k_fe_ops = {
+ .startup = mt2701_cs42448_fe_ops_startup,
+};
+
+static int mt2701_cs42448_be_ops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ unsigned int mclk_rate;
+ unsigned int rate = params_rate(params);
+ unsigned int div_mclk_over_bck = rate > 192000 ? 2 : 4;
+ unsigned int div_bck_over_lrck = 64;
+
+ mclk_rate = rate * div_bck_over_lrck * div_mclk_over_bck;
+
+ /* mt2701 mclk */
+ snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate, SND_SOC_CLOCK_OUT);
+
+ /* codec mclk */
+ snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate, SND_SOC_CLOCK_IN);
+
+ return 0;
+}
+
+static struct snd_soc_ops mt2701_cs42448_be_ops = {
+ .hw_params = mt2701_cs42448_be_ops_hw_params
+};
+
+enum {
+ DAI_LINK_FE_MULTI_CH_OUT,
+ DAI_LINK_FE_PCM0_IN,
+ DAI_LINK_FE_PCM1_IN,
+ DAI_LINK_FE_BT_OUT,
+ DAI_LINK_FE_BT_IN,
+ DAI_LINK_BE_I2S0,
+ DAI_LINK_BE_I2S1,
+ DAI_LINK_BE_I2S2,
+ DAI_LINK_BE_I2S3,
+ DAI_LINK_BE_MRG_BT,
+};
+
+static struct snd_soc_dai_link mt2701_cs42448_dai_links[] = {
+ /* FE */
+ [DAI_LINK_FE_MULTI_CH_OUT] = {
+ .name = "mt2701-cs42448-multi-ch-out",
+ .stream_name = "mt2701-cs42448-multi-ch-out",
+ .cpu_dai_name = "PCM_multi",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &mt2701_cs42448_48k_fe_ops,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_FE_PCM0_IN] = {
+ .name = "mt2701-cs42448-pcm0",
+ .stream_name = "mt2701-cs42448-pcm0-data-UL",
+ .cpu_dai_name = "PCM0",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &mt2701_cs42448_48k_fe_ops,
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_FE_PCM1_IN] = {
+ .name = "mt2701-cs42448-pcm1-data-UL",
+ .stream_name = "mt2701-cs42448-pcm1-data-UL",
+ .cpu_dai_name = "PCM1",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &mt2701_cs42448_48k_fe_ops,
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_FE_BT_OUT] = {
+ .name = "mt2701-cs42448-pcm-BT-out",
+ .stream_name = "mt2701-cs42448-pcm-BT",
+ .cpu_dai_name = "PCM_BT_DL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_FE_BT_IN] = {
+ .name = "mt2701-cs42448-pcm-BT-in",
+ .stream_name = "mt2701-cs42448-pcm-BT",
+ .cpu_dai_name = "PCM_BT_UL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ /* BE */
+ [DAI_LINK_BE_I2S0] = {
+ .name = "mt2701-cs42448-I2S0",
+ .cpu_dai_name = "I2S0",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_I2S1] = {
+ .name = "mt2701-cs42448-I2S1",
+ .cpu_dai_name = "I2S1",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_I2S2] = {
+ .name = "mt2701-cs42448-I2S2",
+ .cpu_dai_name = "I2S2",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_I2S3] = {
+ .name = "mt2701-cs42448-I2S3",
+ .cpu_dai_name = "I2S3",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_MRG_BT] = {
+ .name = "mt2701-cs42448-MRG-BT",
+ .cpu_dai_name = "MRG BT",
+ .no_pcm = 1,
+ .codec_dai_name = "bt-sco-pcm-wb",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+};
+
+static struct snd_soc_card mt2701_cs42448_soc_card = {
+ .name = "mt2701-cs42448",
+ .owner = THIS_MODULE,
+ .dai_link = mt2701_cs42448_dai_links,
+ .num_links = ARRAY_SIZE(mt2701_cs42448_dai_links),
+ .controls = mt2701_cs42448_controls,
+ .num_controls = ARRAY_SIZE(mt2701_cs42448_controls),
+ .dapm_widgets = mt2701_cs42448_asoc_card_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt2701_cs42448_asoc_card_dapm_widgets),
+};
+
+static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &mt2701_cs42448_soc_card;
+ int ret;
+ int i;
+ struct device_node *platform_node, *codec_node, *codec_node_bt_mrg;
+ struct mt2701_cs42448_private *priv =
+ devm_kzalloc(&pdev->dev, sizeof(struct mt2701_cs42448_private),
+ GFP_KERNEL);
+ struct device *dev = &pdev->dev;
+
+ if (!priv)
+ return -ENOMEM;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt2701_cs42448_dai_links[i].platform_name)
+ continue;
+ mt2701_cs42448_dai_links[i].platform_of_node = platform_node;
+ }
+
+ card->dev = dev;
+
+ codec_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,audio-codec", 0);
+ if (!codec_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt2701_cs42448_dai_links[i].codec_name)
+ continue;
+ mt2701_cs42448_dai_links[i].codec_of_node = codec_node;
+ }
+
+ codec_node_bt_mrg = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,audio-codec-bt-mrg", 0);
+ if (!codec_node_bt_mrg) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec-bt-mrg' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt2701_cs42448_dai_links[DAI_LINK_BE_MRG_BT].codec_of_node
+ = codec_node_bt_mrg;
+
+ ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
+ return ret;
+ }
+
+ priv->i2s1_in_mux_gpio_sel_1 =
+ of_get_named_gpio(dev->of_node, "i2s1-in-sel-gpio1", 0);
+ if (gpio_is_valid(priv->i2s1_in_mux_gpio_sel_1)) {
+ ret = devm_gpio_request(dev, priv->i2s1_in_mux_gpio_sel_1,
+ "i2s1_in_mux_gpio_sel_1");
+ if (ret)
+ dev_warn(&pdev->dev, "%s devm_gpio_request fail %d\n",
+ __func__, ret);
+ gpio_direction_output(priv->i2s1_in_mux_gpio_sel_1, 0);
+ }
+
+ priv->i2s1_in_mux_gpio_sel_2 =
+ of_get_named_gpio(dev->of_node, "i2s1-in-sel-gpio2", 0);
+ if (gpio_is_valid(priv->i2s1_in_mux_gpio_sel_2)) {
+ ret = devm_gpio_request(dev, priv->i2s1_in_mux_gpio_sel_2,
+ "i2s1_in_mux_gpio_sel_2");
+ if (ret)
+ dev_warn(&pdev->dev, "%s devm_gpio_request fail2 %d\n",
+ __func__, ret);
+ gpio_direction_output(priv->i2s1_in_mux_gpio_sel_2, 0);
+ }
+ snd_soc_card_set_drvdata(card, priv);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt2701_cs42448_machine_dt_match[] = {
+ {.compatible = "mediatek,mt2701-cs42448-machine",},
+ {}
+};
+#endif
+
+static struct platform_driver mt2701_cs42448_machine = {
+ .driver = {
+ .name = "mt2701-cs42448",
+ #ifdef CONFIG_OF
+ .of_match_table = mt2701_cs42448_machine_dt_match,
+ #endif
+ },
+ .probe = mt2701_cs42448_machine_probe,
+};
+
+module_platform_driver(mt2701_cs42448_machine);
+
+/* Module information */
+MODULE_DESCRIPTION("MT2701 CS42448 ALSA SoC machine driver");
+MODULE_AUTHOR("Ir Lian <ir.lian@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt2701 cs42448 soc card");
--- /dev/null
+/*
+ * mt2701-reg.h -- Mediatek 2701 audio driver reg definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT2701_REG_H_
+#define _MT2701_REG_H_
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include "mt2701-afe-common.h"
+
+/*****************************************************************************
+ * R E G I S T E R D E F I N I T I O N
+ *****************************************************************************/
+#define AUDIO_TOP_CON0 0x0000
+#define AUDIO_TOP_CON4 0x0010
+#define AUDIO_TOP_CON5 0x0014
+#define AFE_DAIBT_CON0 0x001c
+#define AFE_MRGIF_CON 0x003c
+#define ASMI_TIMING_CON1 0x0100
+#define ASMO_TIMING_CON1 0x0104
+#define PWR1_ASM_CON1 0x0108
+#define ASYS_TOP_CON 0x0600
+#define ASYS_I2SIN1_CON 0x0604
+#define ASYS_I2SIN2_CON 0x0608
+#define ASYS_I2SIN3_CON 0x060c
+#define ASYS_I2SIN4_CON 0x0610
+#define ASYS_I2SIN5_CON 0x0614
+#define ASYS_I2SO1_CON 0x061C
+#define ASYS_I2SO2_CON 0x0620
+#define ASYS_I2SO3_CON 0x0624
+#define ASYS_I2SO4_CON 0x0628
+#define ASYS_I2SO5_CON 0x062c
+#define PWR2_TOP_CON 0x0634
+#define AFE_CONN0 0x06c0
+#define AFE_CONN1 0x06c4
+#define AFE_CONN2 0x06c8
+#define AFE_CONN3 0x06cc
+#define AFE_CONN14 0x06f8
+#define AFE_CONN15 0x06fc
+#define AFE_CONN16 0x0700
+#define AFE_CONN17 0x0704
+#define AFE_CONN18 0x0708
+#define AFE_CONN19 0x070c
+#define AFE_CONN20 0x0710
+#define AFE_CONN21 0x0714
+#define AFE_CONN22 0x0718
+#define AFE_CONN23 0x071c
+#define AFE_CONN24 0x0720
+#define AFE_CONN41 0x0764
+#define ASYS_IRQ1_CON 0x0780
+#define ASYS_IRQ2_CON 0x0784
+#define ASYS_IRQ3_CON 0x0788
+#define ASYS_IRQ_CLR 0x07c0
+#define ASYS_IRQ_STATUS 0x07c4
+#define PWR2_ASM_CON1 0x1070
+#define AFE_DAC_CON0 0x1200
+#define AFE_DAC_CON1 0x1204
+#define AFE_DAC_CON2 0x1208
+#define AFE_DAC_CON3 0x120c
+#define AFE_DAC_CON4 0x1210
+#define AFE_MEMIF_HD_CON1 0x121c
+#define AFE_MEMIF_PBUF_SIZE 0x1238
+#define AFE_MEMIF_HD_CON0 0x123c
+#define AFE_DL1_BASE 0x1240
+#define AFE_DL1_CUR 0x1244
+#define AFE_DL2_BASE 0x1250
+#define AFE_DL2_CUR 0x1254
+#define AFE_DL3_BASE 0x1260
+#define AFE_DL3_CUR 0x1264
+#define AFE_DL4_BASE 0x1270
+#define AFE_DL4_CUR 0x1274
+#define AFE_DL5_BASE 0x1280
+#define AFE_DL5_CUR 0x1284
+#define AFE_DLMCH_BASE 0x12a0
+#define AFE_DLMCH_CUR 0x12a4
+#define AFE_ARB1_BASE 0x12b0
+#define AFE_ARB1_CUR 0x12b4
+#define AFE_VUL_BASE 0x1300
+#define AFE_VUL_CUR 0x130c
+#define AFE_UL2_BASE 0x1310
+#define AFE_UL2_END 0x1318
+#define AFE_UL2_CUR 0x131c
+#define AFE_UL3_BASE 0x1320
+#define AFE_UL3_END 0x1328
+#define AFE_UL3_CUR 0x132c
+#define AFE_UL4_BASE 0x1330
+#define AFE_UL4_END 0x1338
+#define AFE_UL4_CUR 0x133c
+#define AFE_UL5_BASE 0x1340
+#define AFE_UL5_END 0x1348
+#define AFE_UL5_CUR 0x134c
+#define AFE_DAI_BASE 0x1370
+#define AFE_DAI_CUR 0x137c
+
+/* AUDIO_TOP_CON0 (0x0000) */
+#define AUDIO_TOP_CON0_A1SYS_A2SYS_ON (0x3 << 0)
+#define AUDIO_TOP_CON0_PDN_AFE (0x1 << 2)
+#define AUDIO_TOP_CON0_PDN_APLL_CK (0x1 << 23)
+
+/* AUDIO_TOP_CON4 (0x0010) */
+#define AUDIO_TOP_CON4_I2SO1_PWN (0x1 << 6)
+#define AUDIO_TOP_CON4_PDN_A1SYS (0x1 << 21)
+#define AUDIO_TOP_CON4_PDN_A2SYS (0x1 << 22)
+#define AUDIO_TOP_CON4_PDN_AFE_CONN (0x1 << 23)
+#define AUDIO_TOP_CON4_PDN_MRGIF (0x1 << 25)
+
+/* AFE_DAIBT_CON0 (0x001c) */
+#define AFE_DAIBT_CON0_DAIBT_EN (0x1 << 0)
+#define AFE_DAIBT_CON0_BT_FUNC_EN (0x1 << 1)
+#define AFE_DAIBT_CON0_BT_FUNC_RDY (0x1 << 3)
+#define AFE_DAIBT_CON0_BT_WIDE_MODE_EN (0x1 << 9)
+#define AFE_DAIBT_CON0_MRG_USE (0x1 << 12)
+
+/* PWR1_ASM_CON1 (0x0108) */
+#define PWR1_ASM_CON1_INIT_VAL (0x492)
+
+/* AFE_MRGIF_CON (0x003c) */
+#define AFE_MRGIF_CON_MRG_EN (0x1 << 0)
+#define AFE_MRGIF_CON_MRG_I2S_EN (0x1 << 16)
+#define AFE_MRGIF_CON_I2S_MODE_MASK (0xf << 20)
+#define AFE_MRGIF_CON_I2S_MODE_32K (0x4 << 20)
+
+/* ASYS_I2SO1_CON (0x061c) */
+#define ASYS_I2SO1_CON_FS (0x1f << 8)
+#define ASYS_I2SO1_CON_FS_SET(x) ((x) << 8)
+#define ASYS_I2SO1_CON_MULTI_CH (0x1 << 16)
+#define ASYS_I2SO1_CON_SIDEGEN (0x1 << 30)
+#define ASYS_I2SO1_CON_I2S_EN (0x1 << 0)
+/* 0:EIAJ 1:I2S */
+#define ASYS_I2SO1_CON_I2S_MODE (0x1 << 3)
+#define ASYS_I2SO1_CON_WIDE_MODE (0x1 << 1)
+#define ASYS_I2SO1_CON_WIDE_MODE_SET(x) ((x) << 1)
+
+/* PWR2_TOP_CON (0x0634) */
+#define PWR2_TOP_CON_INIT_VAL (0xffe1ffff)
+
+/* ASYS_IRQ_CLR (0x07c0) */
+#define ASYS_IRQ_CLR_ALL (0xffffffff)
+
+/* PWR2_ASM_CON1 (0x1070) */
+#define PWR2_ASM_CON1_INIT_VAL (0x492492)
+
+/* AFE_DAC_CON0 (0x1200) */
+#define AFE_DAC_CON0_AFE_ON (0x1 << 0)
+
+/* AFE_MEMIF_PBUF_SIZE (0x1238) */
+#define AFE_MEMIF_PBUF_SIZE_DLM_MASK (0x1 << 29)
+#define AFE_MEMIF_PBUF_SIZE_PAIR_INTERLEAVE (0x0 << 29)
+#define AFE_MEMIF_PBUF_SIZE_FULL_INTERLEAVE (0x1 << 29)
+#define DLMCH_BIT_WIDTH_MASK (0x1 << 28)
+#define AFE_MEMIF_PBUF_SIZE_DLM_CH_MASK (0xf << 24)
+#define AFE_MEMIF_PBUF_SIZE_DLM_CH(x) ((x) << 24)
+#define AFE_MEMIF_PBUF_SIZE_DLM_BYTE_MASK (0x3 << 12)
+#define AFE_MEMIF_PBUF_SIZE_DLM_32BYTES (0x1 << 12)
+
+/* I2S in/out register bit control */
+#define ASYS_I2S_CON_FS (0x1f << 8)
+#define ASYS_I2S_CON_FS_SET(x) ((x) << 8)
+#define ASYS_I2S_CON_RESET (0x1 << 30)
+#define ASYS_I2S_CON_I2S_EN (0x1 << 0)
+#define ASYS_I2S_CON_I2S_COUPLE_MODE (0x1 << 17)
+/* 0:EIAJ 1:I2S */
+#define ASYS_I2S_CON_I2S_MODE (0x1 << 3)
+#define ASYS_I2S_CON_WIDE_MODE (0x1 << 1)
+#define ASYS_I2S_CON_WIDE_MODE_SET(x) ((x) << 1)
+#define ASYS_I2S_IN_PHASE_FIX (0x1 << 31)
+
+#define AFE_END_ADDR 0x15e0
+#endif
+++ /dev/null
-/*
- * mt8173-max98090.c -- MT8173 MAX98090 ALSA SoC machine driver
- *
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include <linux/gpio.h>
-#include "../codecs/max98090.h"
-
-static struct snd_soc_jack mt8173_max98090_jack;
-
-static struct snd_soc_jack_pin mt8173_max98090_jack_pins[] = {
- {
- .pin = "Headphone",
- .mask = SND_JACK_HEADPHONE,
- },
- {
- .pin = "Headset Mic",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-
-static const struct snd_soc_dapm_widget mt8173_max98090_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mt8173_max98090_routes[] = {
- {"Speaker", NULL, "SPKL"},
- {"Speaker", NULL, "SPKR"},
- {"DMICL", NULL, "Int Mic"},
- {"Headphone", NULL, "HPL"},
- {"Headphone", NULL, "HPR"},
- {"Headset Mic", NULL, "MICBIAS"},
- {"IN34", NULL, "Headset Mic"},
-};
-
-static const struct snd_kcontrol_new mt8173_max98090_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
-};
-
-static int mt8173_max98090_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
- return snd_soc_dai_set_sysclk(codec_dai, 0, params_rate(params) * 256,
- SND_SOC_CLOCK_IN);
-}
-
-static struct snd_soc_ops mt8173_max98090_ops = {
- .hw_params = mt8173_max98090_hw_params,
-};
-
-static int mt8173_max98090_init(struct snd_soc_pcm_runtime *runtime)
-{
- int ret;
- struct snd_soc_card *card = runtime->card;
- struct snd_soc_codec *codec = runtime->codec;
-
- /* enable jack detection */
- ret = snd_soc_card_jack_new(card, "Headphone", SND_JACK_HEADPHONE,
- &mt8173_max98090_jack, NULL, 0);
- if (ret) {
- dev_err(card->dev, "Can't snd_soc_jack_new %d\n", ret);
- return ret;
- }
-
- ret = snd_soc_jack_add_pins(&mt8173_max98090_jack,
- ARRAY_SIZE(mt8173_max98090_jack_pins),
- mt8173_max98090_jack_pins);
- if (ret) {
- dev_err(card->dev, "Can't snd_soc_jack_add_pins %d\n", ret);
- return ret;
- }
-
- return max98090_mic_detect(codec, &mt8173_max98090_jack);
-}
-
-/* Digital audio interface glue - connects codec <---> CPU */
-static struct snd_soc_dai_link mt8173_max98090_dais[] = {
- /* Front End DAI links */
- {
- .name = "MAX98090 Playback",
- .stream_name = "MAX98090 Playback",
- .cpu_dai_name = "DL1",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_playback = 1,
- },
- {
- .name = "MAX98090 Capture",
- .stream_name = "MAX98090 Capture",
- .cpu_dai_name = "VUL",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_capture = 1,
- },
- /* Back End DAI links */
- {
- .name = "Codec",
- .cpu_dai_name = "I2S",
- .no_pcm = 1,
- .codec_dai_name = "HiFi",
- .init = mt8173_max98090_init,
- .ops = &mt8173_max98090_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- },
-};
-
-static struct snd_soc_card mt8173_max98090_card = {
- .name = "mt8173-max98090",
- .owner = THIS_MODULE,
- .dai_link = mt8173_max98090_dais,
- .num_links = ARRAY_SIZE(mt8173_max98090_dais),
- .controls = mt8173_max98090_controls,
- .num_controls = ARRAY_SIZE(mt8173_max98090_controls),
- .dapm_widgets = mt8173_max98090_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mt8173_max98090_widgets),
- .dapm_routes = mt8173_max98090_routes,
- .num_dapm_routes = ARRAY_SIZE(mt8173_max98090_routes),
-};
-
-static int mt8173_max98090_dev_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &mt8173_max98090_card;
- struct device_node *codec_node, *platform_node;
- int ret, i;
-
- platform_node = of_parse_phandle(pdev->dev.of_node,
- "mediatek,platform", 0);
- if (!platform_node) {
- dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
- return -EINVAL;
- }
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_max98090_dais[i].platform_name)
- continue;
- mt8173_max98090_dais[i].platform_of_node = platform_node;
- }
-
- codec_node = of_parse_phandle(pdev->dev.of_node,
- "mediatek,audio-codec", 0);
- if (!codec_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_max98090_dais[i].codec_name)
- continue;
- mt8173_max98090_dais[i].codec_of_node = codec_node;
- }
- card->dev = &pdev->dev;
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
- __func__, ret);
- return ret;
-}
-
-static const struct of_device_id mt8173_max98090_dt_match[] = {
- { .compatible = "mediatek,mt8173-max98090", },
- { }
-};
-MODULE_DEVICE_TABLE(of, mt8173_max98090_dt_match);
-
-static struct platform_driver mt8173_max98090_driver = {
- .driver = {
- .name = "mt8173-max98090",
- .of_match_table = mt8173_max98090_dt_match,
-#ifdef CONFIG_PM
- .pm = &snd_soc_pm_ops,
-#endif
- },
- .probe = mt8173_max98090_dev_probe,
-};
-
-module_platform_driver(mt8173_max98090_driver);
-
-/* Module information */
-MODULE_DESCRIPTION("MT8173 MAX98090 ALSA SoC machine driver");
-MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:mt8173-max98090");
-
+++ /dev/null
-/*
- * mt8173-rt5650-rt5514.c -- MT8173 machine driver with RT5650/5514 codecs
- *
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/rt5645.h"
-
-#define MCLK_FOR_CODECS 12288000
-
-static const struct snd_soc_dapm_widget mt8173_rt5650_rt5514_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mt8173_rt5650_rt5514_routes[] = {
- {"Speaker", NULL, "SPOL"},
- {"Speaker", NULL, "SPOR"},
- {"Sub DMIC1L", NULL, "Int Mic"},
- {"Sub DMIC1R", NULL, "Int Mic"},
- {"Headphone", NULL, "HPOL"},
- {"Headphone", NULL, "HPOR"},
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
- {"IN1P", NULL, "Headset Mic"},
- {"IN1N", NULL, "Headset Mic"},
-};
-
-static const struct snd_kcontrol_new mt8173_rt5650_rt5514_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
-};
-
-static int mt8173_rt5650_rt5514_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int i, ret;
-
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
- /* pll from mclk 12.288M */
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
- params_rate(params) * 512);
- if (ret)
- return ret;
-
- /* sysclk from pll */
- ret = snd_soc_dai_set_sysclk(codec_dai, 1,
- params_rate(params) * 512,
- SND_SOC_CLOCK_IN);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static struct snd_soc_ops mt8173_rt5650_rt5514_ops = {
- .hw_params = mt8173_rt5650_rt5514_hw_params,
-};
-
-static struct snd_soc_jack mt8173_rt5650_rt5514_jack;
-
-static int mt8173_rt5650_rt5514_init(struct snd_soc_pcm_runtime *runtime)
-{
- struct snd_soc_card *card = runtime->card;
- struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
- int ret;
-
- rt5645_sel_asrc_clk_src(codec,
- RT5645_DA_STEREO_FILTER |
- RT5645_AD_STEREO_FILTER,
- RT5645_CLK_SEL_I2S1_ASRC);
-
- /* enable jack detection */
- ret = snd_soc_card_jack_new(card, "Headset Jack",
- SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &mt8173_rt5650_rt5514_jack, NULL, 0);
- if (ret) {
- dev_err(card->dev, "Can't new Headset Jack %d\n", ret);
- return ret;
- }
-
- return rt5645_set_jack_detect(codec,
- &mt8173_rt5650_rt5514_jack,
- &mt8173_rt5650_rt5514_jack,
- &mt8173_rt5650_rt5514_jack);
-}
-
-static struct snd_soc_dai_link_component mt8173_rt5650_rt5514_codecs[] = {
- {
- .dai_name = "rt5645-aif1",
- },
- {
- .dai_name = "rt5514-aif1",
- },
-};
-
-enum {
- DAI_LINK_PLAYBACK,
- DAI_LINK_CAPTURE,
- DAI_LINK_CODEC_I2S,
-};
-
-/* Digital audio interface glue - connects codec <---> CPU */
-static struct snd_soc_dai_link mt8173_rt5650_rt5514_dais[] = {
- /* Front End DAI links */
- [DAI_LINK_PLAYBACK] = {
- .name = "rt5650_rt5514 Playback",
- .stream_name = "rt5650_rt5514 Playback",
- .cpu_dai_name = "DL1",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_playback = 1,
- },
- [DAI_LINK_CAPTURE] = {
- .name = "rt5650_rt5514 Capture",
- .stream_name = "rt5650_rt5514 Capture",
- .cpu_dai_name = "VUL",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_capture = 1,
- },
- /* Back End DAI links */
- [DAI_LINK_CODEC_I2S] = {
- .name = "Codec",
- .cpu_dai_name = "I2S",
- .no_pcm = 1,
- .codecs = mt8173_rt5650_rt5514_codecs,
- .num_codecs = 2,
- .init = mt8173_rt5650_rt5514_init,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- .ops = &mt8173_rt5650_rt5514_ops,
- .ignore_pmdown_time = 1,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- },
-};
-
-static struct snd_soc_codec_conf mt8173_rt5650_rt5514_codec_conf[] = {
- {
- .name_prefix = "Sub",
- },
-};
-
-static struct snd_soc_card mt8173_rt5650_rt5514_card = {
- .name = "mtk-rt5650-rt5514",
- .owner = THIS_MODULE,
- .dai_link = mt8173_rt5650_rt5514_dais,
- .num_links = ARRAY_SIZE(mt8173_rt5650_rt5514_dais),
- .codec_conf = mt8173_rt5650_rt5514_codec_conf,
- .num_configs = ARRAY_SIZE(mt8173_rt5650_rt5514_codec_conf),
- .controls = mt8173_rt5650_rt5514_controls,
- .num_controls = ARRAY_SIZE(mt8173_rt5650_rt5514_controls),
- .dapm_widgets = mt8173_rt5650_rt5514_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mt8173_rt5650_rt5514_widgets),
- .dapm_routes = mt8173_rt5650_rt5514_routes,
- .num_dapm_routes = ARRAY_SIZE(mt8173_rt5650_rt5514_routes),
-};
-
-static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &mt8173_rt5650_rt5514_card;
- struct device_node *platform_node;
- int i, ret;
-
- platform_node = of_parse_phandle(pdev->dev.of_node,
- "mediatek,platform", 0);
- if (!platform_node) {
- dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
- return -EINVAL;
- }
-
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_rt5514_dais[i].platform_name)
- continue;
- mt8173_rt5650_rt5514_dais[i].platform_of_node = platform_node;
- }
-
- mt8173_rt5650_rt5514_codecs[0].of_node =
- of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
- if (!mt8173_rt5650_rt5514_codecs[0].of_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
- mt8173_rt5650_rt5514_codecs[1].of_node =
- of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
- if (!mt8173_rt5650_rt5514_codecs[1].of_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
- mt8173_rt5650_rt5514_codec_conf[0].of_node =
- mt8173_rt5650_rt5514_codecs[1].of_node;
-
- card->dev = &pdev->dev;
- platform_set_drvdata(pdev, card);
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
- __func__, ret);
- return ret;
-}
-
-static const struct of_device_id mt8173_rt5650_rt5514_dt_match[] = {
- { .compatible = "mediatek,mt8173-rt5650-rt5514", },
- { }
-};
-MODULE_DEVICE_TABLE(of, mt8173_rt5650_rt5514_dt_match);
-
-static struct platform_driver mt8173_rt5650_rt5514_driver = {
- .driver = {
- .name = "mtk-rt5650-rt5514",
- .of_match_table = mt8173_rt5650_rt5514_dt_match,
-#ifdef CONFIG_PM
- .pm = &snd_soc_pm_ops,
-#endif
- },
- .probe = mt8173_rt5650_rt5514_dev_probe,
-};
-
-module_platform_driver(mt8173_rt5650_rt5514_driver);
-
-/* Module information */
-MODULE_DESCRIPTION("MT8173 RT5650 and RT5514 SoC machine driver");
-MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:mtk-rt5650-rt5514");
-
+++ /dev/null
-/*
- * mt8173-rt5650-rt5676.c -- MT8173 machine driver with RT5650/5676 codecs
- *
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/rt5645.h"
-#include "../codecs/rt5677.h"
-
-#define MCLK_FOR_CODECS 12288000
-
-static const struct snd_soc_dapm_widget mt8173_rt5650_rt5676_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mt8173_rt5650_rt5676_routes[] = {
- {"Speaker", NULL, "SPOL"},
- {"Speaker", NULL, "SPOR"},
- {"Speaker", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650 */
- {"Sub DMIC L1", NULL, "Int Mic"}, /* DMIC from 5676 */
- {"Sub DMIC R1", NULL, "Int Mic"},
- {"Headphone", NULL, "HPOL"},
- {"Headphone", NULL, "HPOR"},
- {"Headphone", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650 */
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
- {"IN1P", NULL, "Headset Mic"},
- {"IN1N", NULL, "Headset Mic"},
- {"Sub AIF2RX", NULL, "Headset Mic"}, /* IF2 DAC from 5650 */
-};
-
-static const struct snd_kcontrol_new mt8173_rt5650_rt5676_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
-};
-
-static int mt8173_rt5650_rt5676_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int i, ret;
-
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
- /* pll from mclk 12.288M */
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
- params_rate(params) * 512);
- if (ret)
- return ret;
-
- /* sysclk from pll */
- ret = snd_soc_dai_set_sysclk(codec_dai, 1,
- params_rate(params) * 512,
- SND_SOC_CLOCK_IN);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static struct snd_soc_ops mt8173_rt5650_rt5676_ops = {
- .hw_params = mt8173_rt5650_rt5676_hw_params,
-};
-
-static struct snd_soc_jack mt8173_rt5650_rt5676_jack;
-
-static int mt8173_rt5650_rt5676_init(struct snd_soc_pcm_runtime *runtime)
-{
- struct snd_soc_card *card = runtime->card;
- struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
- struct snd_soc_codec *codec_sub = runtime->codec_dais[1]->codec;
- int ret;
-
- rt5645_sel_asrc_clk_src(codec,
- RT5645_DA_STEREO_FILTER |
- RT5645_AD_STEREO_FILTER,
- RT5645_CLK_SEL_I2S1_ASRC);
- rt5677_sel_asrc_clk_src(codec_sub,
- RT5677_DA_STEREO_FILTER |
- RT5677_AD_STEREO1_FILTER,
- RT5677_CLK_SEL_I2S1_ASRC);
- rt5677_sel_asrc_clk_src(codec_sub,
- RT5677_AD_STEREO2_FILTER |
- RT5677_I2S2_SOURCE,
- RT5677_CLK_SEL_I2S2_ASRC);
-
- /* enable jack detection */
- ret = snd_soc_card_jack_new(card, "Headset Jack",
- SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &mt8173_rt5650_rt5676_jack, NULL, 0);
- if (ret) {
- dev_err(card->dev, "Can't new Headset Jack %d\n", ret);
- return ret;
- }
-
- return rt5645_set_jack_detect(codec,
- &mt8173_rt5650_rt5676_jack,
- &mt8173_rt5650_rt5676_jack,
- &mt8173_rt5650_rt5676_jack);
-}
-
-static struct snd_soc_dai_link_component mt8173_rt5650_rt5676_codecs[] = {
- {
- .dai_name = "rt5645-aif1",
- },
- {
- .dai_name = "rt5677-aif1",
- },
-};
-
-enum {
- DAI_LINK_PLAYBACK,
- DAI_LINK_CAPTURE,
- DAI_LINK_HDMI,
- DAI_LINK_CODEC_I2S,
- DAI_LINK_HDMI_I2S,
- DAI_LINK_INTERCODEC
-};
-
-/* Digital audio interface glue - connects codec <---> CPU */
-static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
- /* Front End DAI links */
- [DAI_LINK_PLAYBACK] = {
- .name = "rt5650_rt5676 Playback",
- .stream_name = "rt5650_rt5676 Playback",
- .cpu_dai_name = "DL1",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_playback = 1,
- },
- [DAI_LINK_CAPTURE] = {
- .name = "rt5650_rt5676 Capture",
- .stream_name = "rt5650_rt5676 Capture",
- .cpu_dai_name = "VUL",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_capture = 1,
- },
- [DAI_LINK_HDMI] = {
- .name = "HDMI",
- .stream_name = "HDMI PCM",
- .cpu_dai_name = "HDMI",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_playback = 1,
- },
-
- /* Back End DAI links */
- [DAI_LINK_CODEC_I2S] = {
- .name = "Codec",
- .cpu_dai_name = "I2S",
- .no_pcm = 1,
- .codecs = mt8173_rt5650_rt5676_codecs,
- .num_codecs = 2,
- .init = mt8173_rt5650_rt5676_init,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- .ops = &mt8173_rt5650_rt5676_ops,
- .ignore_pmdown_time = 1,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- },
- [DAI_LINK_HDMI_I2S] = {
- .name = "HDMI BE",
- .cpu_dai_name = "HDMIO",
- .no_pcm = 1,
- .codec_dai_name = "i2s-hifi",
- .dpcm_playback = 1,
- },
- /* rt5676 <-> rt5650 intercodec link: Sets rt5676 I2S2 as master */
- [DAI_LINK_INTERCODEC] = {
- .name = "rt5650_rt5676 intercodec",
- .stream_name = "rt5650_rt5676 intercodec",
- .cpu_dai_name = "snd-soc-dummy-dai",
- .platform_name = "snd-soc-dummy",
- .no_pcm = 1,
- .codec_dai_name = "rt5677-aif2",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM,
- },
-
-};
-
-static struct snd_soc_codec_conf mt8173_rt5650_rt5676_codec_conf[] = {
- {
- .name_prefix = "Sub",
- },
-};
-
-static struct snd_soc_card mt8173_rt5650_rt5676_card = {
- .name = "mtk-rt5650-rt5676",
- .owner = THIS_MODULE,
- .dai_link = mt8173_rt5650_rt5676_dais,
- .num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais),
- .codec_conf = mt8173_rt5650_rt5676_codec_conf,
- .num_configs = ARRAY_SIZE(mt8173_rt5650_rt5676_codec_conf),
- .controls = mt8173_rt5650_rt5676_controls,
- .num_controls = ARRAY_SIZE(mt8173_rt5650_rt5676_controls),
- .dapm_widgets = mt8173_rt5650_rt5676_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mt8173_rt5650_rt5676_widgets),
- .dapm_routes = mt8173_rt5650_rt5676_routes,
- .num_dapm_routes = ARRAY_SIZE(mt8173_rt5650_rt5676_routes),
-};
-
-static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
- struct device_node *platform_node;
- int i, ret;
-
- platform_node = of_parse_phandle(pdev->dev.of_node,
- "mediatek,platform", 0);
- if (!platform_node) {
- dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
- return -EINVAL;
- }
-
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_rt5676_dais[i].platform_name)
- continue;
- mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
- }
-
- mt8173_rt5650_rt5676_codecs[0].of_node =
- of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
- if (!mt8173_rt5650_rt5676_codecs[0].of_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
- mt8173_rt5650_rt5676_codecs[1].of_node =
- of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
- if (!mt8173_rt5650_rt5676_codecs[1].of_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
- mt8173_rt5650_rt5676_codec_conf[0].of_node =
- mt8173_rt5650_rt5676_codecs[1].of_node;
-
- mt8173_rt5650_rt5676_dais[DAI_LINK_INTERCODEC].codec_of_node =
- mt8173_rt5650_rt5676_codecs[1].of_node;
-
- mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node =
- of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 2);
- if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
-
- card->dev = &pdev->dev;
- platform_set_drvdata(pdev, card);
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
- __func__, ret);
- return ret;
-}
-
-static const struct of_device_id mt8173_rt5650_rt5676_dt_match[] = {
- { .compatible = "mediatek,mt8173-rt5650-rt5676", },
- { }
-};
-MODULE_DEVICE_TABLE(of, mt8173_rt5650_rt5676_dt_match);
-
-static struct platform_driver mt8173_rt5650_rt5676_driver = {
- .driver = {
- .name = "mtk-rt5650-rt5676",
- .of_match_table = mt8173_rt5650_rt5676_dt_match,
-#ifdef CONFIG_PM
- .pm = &snd_soc_pm_ops,
-#endif
- },
- .probe = mt8173_rt5650_rt5676_dev_probe,
-};
-
-module_platform_driver(mt8173_rt5650_rt5676_driver);
-
-/* Module information */
-MODULE_DESCRIPTION("MT8173 RT5650 and RT5676 SoC machine driver");
-MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:mtk-rt5650-rt5676");
-
+++ /dev/null
-/*
- * mt8173-rt5650.c -- MT8173 machine driver with RT5650 codecs
- *
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/rt5645.h"
-
-#define MCLK_FOR_CODECS 12288000
-
-static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
- {"Speaker", NULL, "SPOL"},
- {"Speaker", NULL, "SPOR"},
- {"DMIC L1", NULL, "Int Mic"},
- {"DMIC R1", NULL, "Int Mic"},
- {"Headphone", NULL, "HPOL"},
- {"Headphone", NULL, "HPOR"},
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
- {"IN1P", NULL, "Headset Mic"},
- {"IN1N", NULL, "Headset Mic"},
-};
-
-static const struct snd_kcontrol_new mt8173_rt5650_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
-};
-
-static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int i, ret;
-
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
- /* pll from mclk 12.288M */
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
- params_rate(params) * 512);
- if (ret)
- return ret;
-
- /* sysclk from pll */
- ret = snd_soc_dai_set_sysclk(codec_dai, 1,
- params_rate(params) * 512,
- SND_SOC_CLOCK_IN);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static struct snd_soc_ops mt8173_rt5650_ops = {
- .hw_params = mt8173_rt5650_hw_params,
-};
-
-static struct snd_soc_jack mt8173_rt5650_jack;
-
-static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
-{
- struct snd_soc_card *card = runtime->card;
- struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
- const char *codec_capture_dai = runtime->codec_dais[1]->name;
- int ret;
-
- rt5645_sel_asrc_clk_src(codec,
- RT5645_DA_STEREO_FILTER,
- RT5645_CLK_SEL_I2S1_ASRC);
-
- if (!strcmp(codec_capture_dai, "rt5645-aif1")) {
- rt5645_sel_asrc_clk_src(codec,
- RT5645_AD_STEREO_FILTER,
- RT5645_CLK_SEL_I2S1_ASRC);
- } else if (!strcmp(codec_capture_dai, "rt5645-aif2")) {
- rt5645_sel_asrc_clk_src(codec,
- RT5645_AD_STEREO_FILTER,
- RT5645_CLK_SEL_I2S2_ASRC);
- } else {
- dev_warn(card->dev,
- "Only one dai codec found in DTS, enabled rt5645 AD filter\n");
- rt5645_sel_asrc_clk_src(codec,
- RT5645_AD_STEREO_FILTER,
- RT5645_CLK_SEL_I2S1_ASRC);
- }
-
- /* enable jack detection */
- ret = snd_soc_card_jack_new(card, "Headset Jack",
- SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &mt8173_rt5650_jack, NULL, 0);
- if (ret) {
- dev_err(card->dev, "Can't new Headset Jack %d\n", ret);
- return ret;
- }
-
- return rt5645_set_jack_detect(codec,
- &mt8173_rt5650_jack,
- &mt8173_rt5650_jack,
- &mt8173_rt5650_jack);
-}
-
-static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = {
- {
- /* Playback */
- .dai_name = "rt5645-aif1",
- },
- {
- /* Capture */
- .dai_name = "rt5645-aif1",
- },
-};
-
-enum {
- DAI_LINK_PLAYBACK,
- DAI_LINK_CAPTURE,
- DAI_LINK_CODEC_I2S,
-};
-
-/* Digital audio interface glue - connects codec <---> CPU */
-static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
- /* Front End DAI links */
- [DAI_LINK_PLAYBACK] = {
- .name = "rt5650 Playback",
- .stream_name = "rt5650 Playback",
- .cpu_dai_name = "DL1",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_playback = 1,
- },
- [DAI_LINK_CAPTURE] = {
- .name = "rt5650 Capture",
- .stream_name = "rt5650 Capture",
- .cpu_dai_name = "VUL",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dynamic = 1,
- .dpcm_capture = 1,
- },
- /* Back End DAI links */
- [DAI_LINK_CODEC_I2S] = {
- .name = "Codec",
- .cpu_dai_name = "I2S",
- .no_pcm = 1,
- .codecs = mt8173_rt5650_codecs,
- .num_codecs = 2,
- .init = mt8173_rt5650_init,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- .ops = &mt8173_rt5650_ops,
- .ignore_pmdown_time = 1,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- },
-};
-
-static struct snd_soc_card mt8173_rt5650_card = {
- .name = "mtk-rt5650",
- .owner = THIS_MODULE,
- .dai_link = mt8173_rt5650_dais,
- .num_links = ARRAY_SIZE(mt8173_rt5650_dais),
- .controls = mt8173_rt5650_controls,
- .num_controls = ARRAY_SIZE(mt8173_rt5650_controls),
- .dapm_widgets = mt8173_rt5650_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mt8173_rt5650_widgets),
- .dapm_routes = mt8173_rt5650_routes,
- .num_dapm_routes = ARRAY_SIZE(mt8173_rt5650_routes),
-};
-
-static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &mt8173_rt5650_card;
- struct device_node *platform_node;
- struct device_node *np;
- const char *codec_capture_dai;
- int i, ret;
-
- platform_node = of_parse_phandle(pdev->dev.of_node,
- "mediatek,platform", 0);
- if (!platform_node) {
- dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
- return -EINVAL;
- }
-
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_dais[i].platform_name)
- continue;
- mt8173_rt5650_dais[i].platform_of_node = platform_node;
- }
-
- mt8173_rt5650_codecs[0].of_node =
- of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
- if (!mt8173_rt5650_codecs[0].of_node) {
- dev_err(&pdev->dev,
- "Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
- }
- mt8173_rt5650_codecs[1].of_node = mt8173_rt5650_codecs[0].of_node;
-
- if (of_find_node_by_name(platform_node, "codec-capture")) {
- np = of_get_child_by_name(pdev->dev.of_node, "codec-capture");
- if (!np) {
- dev_err(&pdev->dev,
- "%s: Can't find codec-capture DT node\n",
- __func__);
- return -EINVAL;
- }
- ret = snd_soc_of_get_dai_name(np, &codec_capture_dai);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "%s codec_capture_dai name fail %d\n",
- __func__, ret);
- return ret;
- }
- mt8173_rt5650_codecs[1].dai_name = codec_capture_dai;
- }
-
- card->dev = &pdev->dev;
- platform_set_drvdata(pdev, card);
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
- __func__, ret);
- return ret;
-}
-
-static const struct of_device_id mt8173_rt5650_dt_match[] = {
- { .compatible = "mediatek,mt8173-rt5650", },
- { }
-};
-MODULE_DEVICE_TABLE(of, mt8173_rt5650_dt_match);
-
-static struct platform_driver mt8173_rt5650_driver = {
- .driver = {
- .name = "mtk-rt5650",
- .of_match_table = mt8173_rt5650_dt_match,
-#ifdef CONFIG_PM
- .pm = &snd_soc_pm_ops,
-#endif
- },
- .probe = mt8173_rt5650_dev_probe,
-};
-
-module_platform_driver(mt8173_rt5650_driver);
-
-/* Module information */
-MODULE_DESCRIPTION("MT8173 RT5650 SoC machine driver");
-MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:mtk-rt5650");
-
--- /dev/null
+# MTK Platform Support
+obj-$(CONFIG_SND_SOC_MT8173) += mt8173-afe-pcm.o
+# Machine support
+obj-$(CONFIG_SND_SOC_MT8173_MAX98090) += mt8173-max98090.o
+obj-$(CONFIG_SND_SOC_MT8173_RT5650) += mt8173-rt5650.o
+obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5514) += mt8173-rt5650-rt5514.o
+obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5676) += mt8173-rt5650-rt5676.o
--- /dev/null
+/*
+ * mt8173_afe_common.h -- Mediatek 8173 audio driver common definitions
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ * Hidalgo Huang <hidalgo.huang@mediatek.com>
+ * Ir Lian <ir.lian@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT8173_AFE_COMMON_H_
+#define _MT8173_AFE_COMMON_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+enum {
+ MT8173_AFE_MEMIF_DL1,
+ MT8173_AFE_MEMIF_DL2,
+ MT8173_AFE_MEMIF_VUL,
+ MT8173_AFE_MEMIF_DAI,
+ MT8173_AFE_MEMIF_AWB,
+ MT8173_AFE_MEMIF_MOD_DAI,
+ MT8173_AFE_MEMIF_HDMI,
+ MT8173_AFE_MEMIF_NUM,
+ MT8173_AFE_IO_MOD_PCM1 = MT8173_AFE_MEMIF_NUM,
+ MT8173_AFE_IO_MOD_PCM2,
+ MT8173_AFE_IO_PMIC,
+ MT8173_AFE_IO_I2S,
+ MT8173_AFE_IO_2ND_I2S,
+ MT8173_AFE_IO_HW_GAIN1,
+ MT8173_AFE_IO_HW_GAIN2,
+ MT8173_AFE_IO_MRG_O,
+ MT8173_AFE_IO_MRG_I,
+ MT8173_AFE_IO_DAIBT,
+ MT8173_AFE_IO_HDMI,
+};
+
+enum {
+ MT8173_AFE_IRQ_DL1,
+ MT8173_AFE_IRQ_DL2,
+ MT8173_AFE_IRQ_VUL,
+ MT8173_AFE_IRQ_DAI,
+ MT8173_AFE_IRQ_AWB,
+ MT8173_AFE_IRQ_MOD_DAI,
+ MT8173_AFE_IRQ_HDMI,
+ MT8173_AFE_IRQ_NUM,
+};
+
+enum {
+ MT8173_CLK_INFRASYS_AUD,
+ MT8173_CLK_TOP_PDN_AUD,
+ MT8173_CLK_TOP_PDN_AUD_BUS,
+ MT8173_CLK_I2S0_M,
+ MT8173_CLK_I2S1_M,
+ MT8173_CLK_I2S2_M,
+ MT8173_CLK_I2S3_M,
+ MT8173_CLK_I2S3_B,
+ MT8173_CLK_BCK0,
+ MT8173_CLK_BCK1,
+ MT8173_CLK_NUM
+};
+
+#endif
--- /dev/null
+/*
+ * Mediatek 8173 ALSA SoC AFE platform driver
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ * Hidalgo Huang <hidalgo.huang@mediatek.com>
+ * Ir Lian <ir.lian@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include "mt8173-afe-common.h"
+#include "../common/mtk-base-afe.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+
+/*****************************************************************************
+ * R E G I S T E R D E F I N I T I O N
+ *****************************************************************************/
+#define AUDIO_TOP_CON0 0x0000
+#define AUDIO_TOP_CON1 0x0004
+#define AFE_DAC_CON0 0x0010
+#define AFE_DAC_CON1 0x0014
+#define AFE_I2S_CON1 0x0034
+#define AFE_I2S_CON2 0x0038
+#define AFE_CONN_24BIT 0x006c
+#define AFE_MEMIF_MSB 0x00cc
+
+#define AFE_CONN1 0x0024
+#define AFE_CONN2 0x0028
+#define AFE_CONN3 0x002c
+#define AFE_CONN7 0x0460
+#define AFE_CONN8 0x0464
+#define AFE_HDMI_CONN0 0x0390
+
+/* Memory interface */
+#define AFE_DL1_BASE 0x0040
+#define AFE_DL1_CUR 0x0044
+#define AFE_DL1_END 0x0048
+#define AFE_DL2_BASE 0x0050
+#define AFE_DL2_CUR 0x0054
+#define AFE_AWB_BASE 0x0070
+#define AFE_AWB_CUR 0x007c
+#define AFE_VUL_BASE 0x0080
+#define AFE_VUL_CUR 0x008c
+#define AFE_VUL_END 0x0088
+#define AFE_DAI_BASE 0x0090
+#define AFE_DAI_CUR 0x009c
+#define AFE_MOD_PCM_BASE 0x0330
+#define AFE_MOD_PCM_CUR 0x033c
+#define AFE_HDMI_OUT_BASE 0x0374
+#define AFE_HDMI_OUT_CUR 0x0378
+#define AFE_HDMI_OUT_END 0x037c
+
+#define AFE_ADDA_TOP_CON0 0x0120
+#define AFE_ADDA2_TOP_CON0 0x0600
+
+#define AFE_HDMI_OUT_CON0 0x0370
+
+#define AFE_IRQ_MCU_CON 0x03a0
+#define AFE_IRQ_STATUS 0x03a4
+#define AFE_IRQ_CLR 0x03a8
+#define AFE_IRQ_CNT1 0x03ac
+#define AFE_IRQ_CNT2 0x03b0
+#define AFE_IRQ_MCU_EN 0x03b4
+#define AFE_IRQ_CNT5 0x03bc
+#define AFE_IRQ_CNT7 0x03dc
+
+#define AFE_TDM_CON1 0x0548
+#define AFE_TDM_CON2 0x054c
+
+#define AFE_IRQ_STATUS_BITS 0xff
+
+/* AUDIO_TOP_CON0 (0x0000) */
+#define AUD_TCON0_PDN_SPDF (0x1 << 21)
+#define AUD_TCON0_PDN_HDMI (0x1 << 20)
+#define AUD_TCON0_PDN_24M (0x1 << 9)
+#define AUD_TCON0_PDN_22M (0x1 << 8)
+#define AUD_TCON0_PDN_AFE (0x1 << 2)
+
+/* AFE_I2S_CON1 (0x0034) */
+#define AFE_I2S_CON1_LOW_JITTER_CLK (0x1 << 12)
+#define AFE_I2S_CON1_RATE(x) (((x) & 0xf) << 8)
+#define AFE_I2S_CON1_FORMAT_I2S (0x1 << 3)
+#define AFE_I2S_CON1_EN (0x1 << 0)
+
+/* AFE_I2S_CON2 (0x0038) */
+#define AFE_I2S_CON2_LOW_JITTER_CLK (0x1 << 12)
+#define AFE_I2S_CON2_RATE(x) (((x) & 0xf) << 8)
+#define AFE_I2S_CON2_FORMAT_I2S (0x1 << 3)
+#define AFE_I2S_CON2_EN (0x1 << 0)
+
+/* AFE_CONN_24BIT (0x006c) */
+#define AFE_CONN_24BIT_O04 (0x1 << 4)
+#define AFE_CONN_24BIT_O03 (0x1 << 3)
+
+/* AFE_HDMI_CONN0 (0x0390) */
+#define AFE_HDMI_CONN0_O37_I37 (0x7 << 21)
+#define AFE_HDMI_CONN0_O36_I36 (0x6 << 18)
+#define AFE_HDMI_CONN0_O35_I33 (0x3 << 15)
+#define AFE_HDMI_CONN0_O34_I32 (0x2 << 12)
+#define AFE_HDMI_CONN0_O33_I35 (0x5 << 9)
+#define AFE_HDMI_CONN0_O32_I34 (0x4 << 6)
+#define AFE_HDMI_CONN0_O31_I31 (0x1 << 3)
+#define AFE_HDMI_CONN0_O30_I30 (0x0 << 0)
+
+/* AFE_TDM_CON1 (0x0548) */
+#define AFE_TDM_CON1_LRCK_WIDTH(x) (((x) - 1) << 24)
+#define AFE_TDM_CON1_32_BCK_CYCLES (0x2 << 12)
+#define AFE_TDM_CON1_WLEN_32BIT (0x2 << 8)
+#define AFE_TDM_CON1_MSB_ALIGNED (0x1 << 4)
+#define AFE_TDM_CON1_1_BCK_DELAY (0x1 << 3)
+#define AFE_TDM_CON1_LRCK_INV (0x1 << 2)
+#define AFE_TDM_CON1_BCK_INV (0x1 << 1)
+#define AFE_TDM_CON1_EN (0x1 << 0)
+
+enum afe_tdm_ch_start {
+ AFE_TDM_CH_START_O30_O31 = 0,
+ AFE_TDM_CH_START_O32_O33,
+ AFE_TDM_CH_START_O34_O35,
+ AFE_TDM_CH_START_O36_O37,
+ AFE_TDM_CH_ZERO,
+};
+
+static const unsigned int mt8173_afe_backup_list[] = {
+ AUDIO_TOP_CON0,
+ AFE_CONN1,
+ AFE_CONN2,
+ AFE_CONN7,
+ AFE_CONN8,
+ AFE_DAC_CON1,
+ AFE_DL1_BASE,
+ AFE_DL1_END,
+ AFE_VUL_BASE,
+ AFE_VUL_END,
+ AFE_HDMI_OUT_BASE,
+ AFE_HDMI_OUT_END,
+ AFE_HDMI_CONN0,
+ AFE_DAC_CON0,
+};
+
+struct mt8173_afe_private {
+ struct clk *clocks[MT8173_CLK_NUM];
+};
+
+static const struct snd_pcm_hardware mt8173_afe_hardware = {
+ .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .buffer_bytes_max = 256 * 1024,
+ .period_bytes_min = 512,
+ .period_bytes_max = 128 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+ .fifo_size = 0,
+};
+
+struct mt8173_afe_rate {
+ unsigned int rate;
+ unsigned int regvalue;
+};
+
+static const struct mt8173_afe_rate mt8173_afe_i2s_rates[] = {
+ { .rate = 8000, .regvalue = 0 },
+ { .rate = 11025, .regvalue = 1 },
+ { .rate = 12000, .regvalue = 2 },
+ { .rate = 16000, .regvalue = 4 },
+ { .rate = 22050, .regvalue = 5 },
+ { .rate = 24000, .regvalue = 6 },
+ { .rate = 32000, .regvalue = 8 },
+ { .rate = 44100, .regvalue = 9 },
+ { .rate = 48000, .regvalue = 10 },
+ { .rate = 88000, .regvalue = 11 },
+ { .rate = 96000, .regvalue = 12 },
+ { .rate = 174000, .regvalue = 13 },
+ { .rate = 192000, .regvalue = 14 },
+};
+
+static int mt8173_afe_i2s_fs(unsigned int sample_rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt8173_afe_i2s_rates); i++)
+ if (mt8173_afe_i2s_rates[i].rate == sample_rate)
+ return mt8173_afe_i2s_rates[i].regvalue;
+
+ return -EINVAL;
+}
+
+static int mt8173_afe_set_i2s(struct mtk_base_afe *afe, unsigned int rate)
+{
+ unsigned int val;
+ int fs = mt8173_afe_i2s_fs(rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ /* from external ADC */
+ regmap_update_bits(afe->regmap, AFE_ADDA_TOP_CON0, 0x1, 0x1);
+ regmap_update_bits(afe->regmap, AFE_ADDA2_TOP_CON0, 0x1, 0x1);
+
+ /* set input */
+ val = AFE_I2S_CON2_LOW_JITTER_CLK |
+ AFE_I2S_CON2_RATE(fs) |
+ AFE_I2S_CON2_FORMAT_I2S;
+
+ regmap_update_bits(afe->regmap, AFE_I2S_CON2, ~AFE_I2S_CON2_EN, val);
+
+ /* set output */
+ val = AFE_I2S_CON1_LOW_JITTER_CLK |
+ AFE_I2S_CON1_RATE(fs) |
+ AFE_I2S_CON1_FORMAT_I2S;
+
+ regmap_update_bits(afe->regmap, AFE_I2S_CON1, ~AFE_I2S_CON1_EN, val);
+ return 0;
+}
+
+static void mt8173_afe_set_i2s_enable(struct mtk_base_afe *afe, bool enable)
+{
+ unsigned int val;
+
+ regmap_read(afe->regmap, AFE_I2S_CON2, &val);
+ if (!!(val & AFE_I2S_CON2_EN) == enable)
+ return;
+
+ /* input */
+ regmap_update_bits(afe->regmap, AFE_I2S_CON2, 0x1, enable);
+
+ /* output */
+ regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, enable);
+}
+
+static int mt8173_afe_dais_enable_clks(struct mtk_base_afe *afe,
+ struct clk *m_ck, struct clk *b_ck)
+{
+ int ret;
+
+ if (m_ck) {
+ ret = clk_prepare_enable(m_ck);
+ if (ret) {
+ dev_err(afe->dev, "Failed to enable m_ck\n");
+ return ret;
+ }
+ }
+
+ if (b_ck) {
+ ret = clk_prepare_enable(b_ck);
+ if (ret) {
+ dev_err(afe->dev, "Failed to enable b_ck\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int mt8173_afe_dais_set_clks(struct mtk_base_afe *afe,
+ struct clk *m_ck, unsigned int mck_rate,
+ struct clk *b_ck, unsigned int bck_rate)
+{
+ int ret;
+
+ if (m_ck) {
+ ret = clk_set_rate(m_ck, mck_rate);
+ if (ret) {
+ dev_err(afe->dev, "Failed to set m_ck rate\n");
+ return ret;
+ }
+ }
+
+ if (b_ck) {
+ ret = clk_set_rate(b_ck, bck_rate);
+ if (ret) {
+ dev_err(afe->dev, "Failed to set b_ck rate\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void mt8173_afe_dais_disable_clks(struct mtk_base_afe *afe,
+ struct clk *m_ck, struct clk *b_ck)
+{
+ if (m_ck)
+ clk_disable_unprepare(m_ck);
+ if (b_ck)
+ clk_disable_unprepare(b_ck);
+}
+
+static int mt8173_afe_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+
+ if (dai->active)
+ return 0;
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M, 0);
+ return 0;
+}
+
+static void mt8173_afe_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+
+ if (dai->active)
+ return;
+
+ mt8173_afe_set_i2s_enable(afe, false);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M,
+ AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M);
+}
+
+static int mt8173_afe_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ mt8173_afe_dais_set_clks(afe, afe_priv->clocks[MT8173_CLK_I2S1_M],
+ runtime->rate * 256, NULL, 0);
+ mt8173_afe_dais_set_clks(afe, afe_priv->clocks[MT8173_CLK_I2S2_M],
+ runtime->rate * 256, NULL, 0);
+ /* config I2S */
+ ret = mt8173_afe_set_i2s(afe, substream->runtime->rate);
+ if (ret)
+ return ret;
+
+ mt8173_afe_set_i2s_enable(afe, true);
+
+ return 0;
+}
+
+static int mt8173_afe_hdmi_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+
+ if (dai->active)
+ return 0;
+
+ mt8173_afe_dais_enable_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
+ afe_priv->clocks[MT8173_CLK_I2S3_B]);
+ return 0;
+}
+
+static void mt8173_afe_hdmi_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+
+ if (dai->active)
+ return;
+
+ mt8173_afe_dais_disable_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
+ afe_priv->clocks[MT8173_CLK_I2S3_B]);
+}
+
+static int mt8173_afe_hdmi_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+
+ unsigned int val;
+
+ mt8173_afe_dais_set_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
+ runtime->rate * 128,
+ afe_priv->clocks[MT8173_CLK_I2S3_B],
+ runtime->rate * runtime->channels * 32);
+
+ val = AFE_TDM_CON1_BCK_INV |
+ AFE_TDM_CON1_LRCK_INV |
+ AFE_TDM_CON1_1_BCK_DELAY |
+ AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */
+ AFE_TDM_CON1_WLEN_32BIT |
+ AFE_TDM_CON1_32_BCK_CYCLES |
+ AFE_TDM_CON1_LRCK_WIDTH(32);
+ regmap_update_bits(afe->regmap, AFE_TDM_CON1, ~AFE_TDM_CON1_EN, val);
+
+ /* set tdm2 config */
+ switch (runtime->channels) {
+ case 1:
+ case 2:
+ val = AFE_TDM_CH_START_O30_O31;
+ val |= (AFE_TDM_CH_ZERO << 4);
+ val |= (AFE_TDM_CH_ZERO << 8);
+ val |= (AFE_TDM_CH_ZERO << 12);
+ break;
+ case 3:
+ case 4:
+ val = AFE_TDM_CH_START_O30_O31;
+ val |= (AFE_TDM_CH_START_O32_O33 << 4);
+ val |= (AFE_TDM_CH_ZERO << 8);
+ val |= (AFE_TDM_CH_ZERO << 12);
+ break;
+ case 5:
+ case 6:
+ val = AFE_TDM_CH_START_O30_O31;
+ val |= (AFE_TDM_CH_START_O32_O33 << 4);
+ val |= (AFE_TDM_CH_START_O34_O35 << 8);
+ val |= (AFE_TDM_CH_ZERO << 12);
+ break;
+ case 7:
+ case 8:
+ val = AFE_TDM_CH_START_O30_O31;
+ val |= (AFE_TDM_CH_START_O32_O33 << 4);
+ val |= (AFE_TDM_CH_START_O34_O35 << 8);
+ val |= (AFE_TDM_CH_START_O36_O37 << 12);
+ break;
+ default:
+ val = 0;
+ }
+ regmap_update_bits(afe->regmap, AFE_TDM_CON2, 0x0000ffff, val);
+
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+ 0x000000f0, runtime->channels << 4);
+ return 0;
+}
+
+static int mt8173_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+
+ dev_info(afe->dev, "%s cmd=%d %s\n", __func__, cmd, dai->name);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF, 0);
+
+ /* set connections: O30~O37: L/R/LS/RS/C/LFE/CH7/CH8 */
+ regmap_write(afe->regmap, AFE_HDMI_CONN0,
+ AFE_HDMI_CONN0_O30_I30 |
+ AFE_HDMI_CONN0_O31_I31 |
+ AFE_HDMI_CONN0_O32_I34 |
+ AFE_HDMI_CONN0_O33_I35 |
+ AFE_HDMI_CONN0_O34_I32 |
+ AFE_HDMI_CONN0_O35_I33 |
+ AFE_HDMI_CONN0_O36_I36 |
+ AFE_HDMI_CONN0_O37_I37);
+
+ /* enable Out control */
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0x1);
+
+ /* enable tdm */
+ regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0x1);
+
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ /* disable tdm */
+ regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0);
+
+ /* disable Out control */
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0);
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF,
+ AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt8173_memif_fs(struct snd_pcm_substream *substream,
+ unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int fs;
+
+ if (memif->data->id == MT8173_AFE_MEMIF_DAI ||
+ memif->data->id == MT8173_AFE_MEMIF_MOD_DAI) {
+ switch (rate) {
+ case 8000:
+ fs = 0;
+ break;
+ case 16000:
+ fs = 1;
+ break;
+ case 32000:
+ fs = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ fs = mt8173_afe_i2s_fs(rate);
+ }
+ return fs;
+}
+
+static int mt8173_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
+{
+ return mt8173_afe_i2s_fs(rate);
+}
+
+/* BE DAIs */
+static const struct snd_soc_dai_ops mt8173_afe_i2s_ops = {
+ .startup = mt8173_afe_i2s_startup,
+ .shutdown = mt8173_afe_i2s_shutdown,
+ .prepare = mt8173_afe_i2s_prepare,
+};
+
+static const struct snd_soc_dai_ops mt8173_afe_hdmi_ops = {
+ .startup = mt8173_afe_hdmi_startup,
+ .shutdown = mt8173_afe_hdmi_shutdown,
+ .prepare = mt8173_afe_hdmi_prepare,
+ .trigger = mt8173_afe_hdmi_trigger,
+};
+
+static struct snd_soc_dai_driver mt8173_afe_pcm_dais[] = {
+ /* FE DAIs: memory intefaces to CPU */
+ {
+ .name = "DL1", /* downlink 1 */
+ .id = MT8173_AFE_MEMIF_DL1,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .playback = {
+ .stream_name = "DL1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mtk_afe_fe_ops,
+ }, {
+ .name = "VUL", /* voice uplink */
+ .id = MT8173_AFE_MEMIF_VUL,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "VUL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mtk_afe_fe_ops,
+ }, {
+ /* BE DAIs */
+ .name = "I2S",
+ .id = MT8173_AFE_IO_I2S,
+ .playback = {
+ .stream_name = "I2S Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "I2S Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt8173_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static struct snd_soc_dai_driver mt8173_afe_hdmi_dais[] = {
+ /* FE DAIs */
+ {
+ .name = "HDMI",
+ .id = MT8173_AFE_MEMIF_HDMI,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .playback = {
+ .stream_name = "HDMI",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mtk_afe_fe_ops,
+ }, {
+ /* BE DAIs */
+ .name = "HDMIO",
+ .id = MT8173_AFE_IO_HDMI,
+ .playback = {
+ .stream_name = "HDMIO Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt8173_afe_hdmi_ops,
+ },
+};
+
+static const struct snd_kcontrol_new mt8173_afe_o03_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 21, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8173_afe_o04_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8173_afe_o09_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 0, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN7, 30, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt8173_afe_o10_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I04 Switch", AFE_CONN3, 3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN8, 0, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mt8173_afe_pcm_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I04", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I17", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0,
+ mt8173_afe_o03_mix, ARRAY_SIZE(mt8173_afe_o03_mix)),
+ SND_SOC_DAPM_MIXER("O04", SND_SOC_NOPM, 0, 0,
+ mt8173_afe_o04_mix, ARRAY_SIZE(mt8173_afe_o04_mix)),
+ SND_SOC_DAPM_MIXER("O09", SND_SOC_NOPM, 0, 0,
+ mt8173_afe_o09_mix, ARRAY_SIZE(mt8173_afe_o09_mix)),
+ SND_SOC_DAPM_MIXER("O10", SND_SOC_NOPM, 0, 0,
+ mt8173_afe_o10_mix, ARRAY_SIZE(mt8173_afe_o10_mix)),
+};
+
+static const struct snd_soc_dapm_route mt8173_afe_pcm_routes[] = {
+ {"I05", NULL, "DL1"},
+ {"I06", NULL, "DL1"},
+ {"I2S Playback", NULL, "O03"},
+ {"I2S Playback", NULL, "O04"},
+ {"VUL", NULL, "O09"},
+ {"VUL", NULL, "O10"},
+ {"I03", NULL, "I2S Capture"},
+ {"I04", NULL, "I2S Capture"},
+ {"I17", NULL, "I2S Capture"},
+ {"I18", NULL, "I2S Capture"},
+ { "O03", "I05 Switch", "I05" },
+ { "O04", "I06 Switch", "I06" },
+ { "O09", "I17 Switch", "I17" },
+ { "O09", "I03 Switch", "I03" },
+ { "O10", "I18 Switch", "I18" },
+ { "O10", "I04 Switch", "I04" },
+};
+
+static const struct snd_soc_dapm_route mt8173_afe_hdmi_routes[] = {
+ {"HDMIO Playback", NULL, "HDMI"},
+};
+
+static const struct snd_soc_component_driver mt8173_afe_pcm_dai_component = {
+ .name = "mt8173-afe-pcm-dai",
+ .dapm_widgets = mt8173_afe_pcm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8173_afe_pcm_widgets),
+ .dapm_routes = mt8173_afe_pcm_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_afe_pcm_routes),
+};
+
+static const struct snd_soc_component_driver mt8173_afe_hdmi_dai_component = {
+ .name = "mt8173-afe-hdmi-dai",
+ .dapm_routes = mt8173_afe_hdmi_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_afe_hdmi_routes),
+};
+
+static const char *aud_clks[MT8173_CLK_NUM] = {
+ [MT8173_CLK_INFRASYS_AUD] = "infra_sys_audio_clk",
+ [MT8173_CLK_TOP_PDN_AUD] = "top_pdn_audio",
+ [MT8173_CLK_TOP_PDN_AUD_BUS] = "top_pdn_aud_intbus",
+ [MT8173_CLK_I2S0_M] = "i2s0_m",
+ [MT8173_CLK_I2S1_M] = "i2s1_m",
+ [MT8173_CLK_I2S2_M] = "i2s2_m",
+ [MT8173_CLK_I2S3_M] = "i2s3_m",
+ [MT8173_CLK_I2S3_B] = "i2s3_b",
+ [MT8173_CLK_BCK0] = "bck0",
+ [MT8173_CLK_BCK1] = "bck1",
+};
+
+static const struct mtk_base_memif_data memif_data[MT8173_AFE_MEMIF_NUM] = {
+ {
+ .name = "DL1",
+ .id = MT8173_AFE_MEMIF_DL1,
+ .reg_ofs_base = AFE_DL1_BASE,
+ .reg_ofs_cur = AFE_DL1_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 0,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = 21,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 1,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 0,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ }, {
+ .name = "DL2",
+ .id = MT8173_AFE_MEMIF_DL2,
+ .reg_ofs_base = AFE_DL2_BASE,
+ .reg_ofs_cur = AFE_DL2_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 4,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = 22,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 2,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 1,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ }, {
+ .name = "VUL",
+ .id = MT8173_AFE_MEMIF_VUL,
+ .reg_ofs_base = AFE_VUL_BASE,
+ .reg_ofs_cur = AFE_VUL_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 16,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = 27,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 3,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 6,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ }, {
+ .name = "DAI",
+ .id = MT8173_AFE_MEMIF_DAI,
+ .reg_ofs_base = AFE_DAI_BASE,
+ .reg_ofs_cur = AFE_DAI_CUR,
+ .fs_reg = AFE_DAC_CON0,
+ .fs_shift = 24,
+ .fs_maskbit = 0x3,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 4,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 5,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ }, {
+ .name = "AWB",
+ .id = MT8173_AFE_MEMIF_AWB,
+ .reg_ofs_base = AFE_AWB_BASE,
+ .reg_ofs_cur = AFE_AWB_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 12,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = 24,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 6,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 3,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ }, {
+ .name = "MOD_DAI",
+ .id = MT8173_AFE_MEMIF_MOD_DAI,
+ .reg_ofs_base = AFE_MOD_PCM_BASE,
+ .reg_ofs_cur = AFE_MOD_PCM_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 30,
+ .fs_maskbit = 0x3,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = 30,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 7,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 4,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ }, {
+ .name = "HDMI",
+ .id = MT8173_AFE_MEMIF_HDMI,
+ .reg_ofs_base = AFE_HDMI_OUT_BASE,
+ .reg_ofs_cur = AFE_HDMI_OUT_CUR,
+ .fs_reg = -1,
+ .fs_shift = -1,
+ .fs_maskbit = -1,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = -1,
+ .enable_shift = -1,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 8,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ },
+};
+
+static const struct mtk_base_irq_data irq_data[MT8173_AFE_IRQ_NUM] = {
+ {
+ .id = MT8173_AFE_IRQ_DL1,
+ .irq_cnt_reg = AFE_IRQ_CNT1,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 0,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 4,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 0,
+ }, {
+ .id = MT8173_AFE_IRQ_DL2,
+ .irq_cnt_reg = AFE_IRQ_CNT1,
+ .irq_cnt_shift = 20,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 2,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 16,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 2,
+
+ }, {
+ .id = MT8173_AFE_IRQ_VUL,
+ .irq_cnt_reg = AFE_IRQ_CNT2,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 1,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 8,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 1,
+ }, {
+ .id = MT8173_AFE_IRQ_DAI,
+ .irq_cnt_reg = AFE_IRQ_CNT2,
+ .irq_cnt_shift = 20,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 3,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 20,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 3,
+ }, {
+ .id = MT8173_AFE_IRQ_AWB,
+ .irq_cnt_reg = AFE_IRQ_CNT7,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 14,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 6,
+ }, {
+ .id = MT8173_AFE_IRQ_DAI,
+ .irq_cnt_reg = AFE_IRQ_CNT2,
+ .irq_cnt_shift = 20,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 3,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 20,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 3,
+ }, {
+ .id = MT8173_AFE_IRQ_HDMI,
+ .irq_cnt_reg = AFE_IRQ_CNT5,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 12,
+ .irq_fs_reg = -1,
+ .irq_fs_shift = -1,
+ .irq_fs_maskbit = -1,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 4,
+ },
+};
+
+static const struct regmap_config mt8173_afe_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = AFE_ADDA2_TOP_CON0,
+ .cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t mt8173_afe_irq_handler(int irq, void *dev_id)
+{
+ struct mtk_base_afe *afe = dev_id;
+ unsigned int reg_value;
+ int i, ret;
+
+ ret = regmap_read(afe->regmap, AFE_IRQ_STATUS, ®_value);
+ if (ret) {
+ dev_err(afe->dev, "%s irq status err\n", __func__);
+ reg_value = AFE_IRQ_STATUS_BITS;
+ goto err_irq;
+ }
+
+ for (i = 0; i < MT8173_AFE_MEMIF_NUM; i++) {
+ struct mtk_base_afe_memif *memif = &afe->memif[i];
+ struct mtk_base_afe_irq *irq;
+
+ if (memif->irq_usage < 0)
+ continue;
+
+ irq = &afe->irqs[memif->irq_usage];
+
+ if (!(reg_value & (1 << irq->irq_data->irq_clr_shift)))
+ continue;
+
+ snd_pcm_period_elapsed(memif->substream);
+ }
+
+err_irq:
+ /* clear irq */
+ regmap_write(afe->regmap, AFE_IRQ_CLR,
+ reg_value & AFE_IRQ_STATUS_BITS);
+
+ return IRQ_HANDLED;
+}
+
+static int mt8173_afe_runtime_suspend(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+
+ /* disable AFE */
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0);
+
+ /* disable AFE clk */
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUD_TCON0_PDN_AFE, AUD_TCON0_PDN_AFE);
+
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S1_M]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S2_M]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_BCK0]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_BCK1]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD_BUS]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_INFRASYS_AUD]);
+ return 0;
+}
+
+static int mt8173_afe_runtime_resume(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_INFRASYS_AUD]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD_BUS]);
+ if (ret)
+ goto err_infra;
+
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD]);
+ if (ret)
+ goto err_top_aud_bus;
+
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_BCK0]);
+ if (ret)
+ goto err_top_aud;
+
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_BCK1]);
+ if (ret)
+ goto err_bck0;
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_I2S1_M]);
+ if (ret)
+ goto err_i2s1_m;
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_I2S2_M]);
+ if (ret)
+ goto err_i2s2_m;
+
+ /* enable AFE clk */
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_AFE, 0);
+
+ /* set O3/O4 16bits */
+ regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
+ AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, 0);
+
+ /* unmask all IRQs */
+ regmap_update_bits(afe->regmap, AFE_IRQ_MCU_EN, 0xff, 0xff);
+
+ /* enable AFE */
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1);
+ return 0;
+
+err_i2s1_m:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S1_M]);
+err_i2s2_m:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S2_M]);
+err_bck0:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_BCK0]);
+err_top_aud:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD]);
+err_top_aud_bus:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD_BUS]);
+err_infra:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_INFRASYS_AUD]);
+ return ret;
+}
+
+static int mt8173_afe_init_audio_clk(struct mtk_base_afe *afe)
+{
+ size_t i;
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+
+ for (i = 0; i < ARRAY_SIZE(aud_clks); i++) {
+ afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
+ if (IS_ERR(afe_priv->clocks[i])) {
+ dev_err(afe->dev, "%s devm_clk_get %s fail\n",
+ __func__, aud_clks[i]);
+ return PTR_ERR(afe_priv->clocks[i]);
+ }
+ }
+ clk_set_rate(afe_priv->clocks[MT8173_CLK_BCK0], 22579200); /* 22M */
+ clk_set_rate(afe_priv->clocks[MT8173_CLK_BCK1], 24576000); /* 24M */
+ return 0;
+}
+
+static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ unsigned int irq_id;
+ struct mtk_base_afe *afe;
+ struct mt8173_afe_private *afe_priv;
+ struct resource *res;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
+ if (ret)
+ return ret;
+
+ afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+
+ afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ GFP_KERNEL);
+ afe_priv = afe->platform_priv;
+ if (!afe_priv)
+ return -ENOMEM;
+
+ afe->dev = &pdev->dev;
+
+ irq_id = platform_get_irq(pdev, 0);
+ if (!irq_id) {
+ dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
+ return -ENXIO;
+ }
+ ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
+ 0, "Afe_ISR_Handle", (void *)afe);
+ if (ret) {
+ dev_err(afe->dev, "could not request_irq\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(afe->base_addr))
+ return PTR_ERR(afe->base_addr);
+
+ afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
+ &mt8173_afe_regmap_config);
+ if (IS_ERR(afe->regmap))
+ return PTR_ERR(afe->regmap);
+
+ /* initial audio related clock */
+ ret = mt8173_afe_init_audio_clk(afe);
+ if (ret) {
+ dev_err(afe->dev, "mt8173_afe_init_audio_clk fail\n");
+ return ret;
+ }
+
+ /* memif % irq initialize*/
+ afe->memif_size = MT8173_AFE_MEMIF_NUM;
+ afe->memif = devm_kcalloc(afe->dev, afe->memif_size,
+ sizeof(*afe->memif), GFP_KERNEL);
+ if (!afe->memif)
+ return -ENOMEM;
+
+ afe->irqs_size = MT8173_AFE_IRQ_NUM;
+ afe->irqs = devm_kcalloc(afe->dev, afe->irqs_size,
+ sizeof(*afe->irqs), GFP_KERNEL);
+ if (!afe->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->irqs_size; i++) {
+ afe->memif[i].data = &memif_data[i];
+ afe->irqs[i].irq_data = &irq_data[i];
+ afe->irqs[i].irq_occupyed = true;
+ afe->memif[i].irq_usage = i;
+ afe->memif[i].const_irq = 1;
+ }
+
+ afe->mtk_afe_hardware = &mt8173_afe_hardware;
+ afe->memif_fs = mt8173_memif_fs;
+ afe->irq_fs = mt8173_irq_fs;
+
+ platform_set_drvdata(pdev, afe);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = mt8173_afe_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+
+ afe->reg_back_up_list = mt8173_afe_backup_list;
+ afe->reg_back_up_list_num = ARRAY_SIZE(mt8173_afe_backup_list);
+ afe->runtime_resume = mt8173_afe_runtime_resume;
+ afe->runtime_suspend = mt8173_afe_runtime_suspend;
+
+ ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+ if (ret)
+ goto err_pm_disable;
+
+ ret = snd_soc_register_component(&pdev->dev,
+ &mt8173_afe_pcm_dai_component,
+ mt8173_afe_pcm_dais,
+ ARRAY_SIZE(mt8173_afe_pcm_dais));
+ if (ret)
+ goto err_platform;
+
+ ret = snd_soc_register_component(&pdev->dev,
+ &mt8173_afe_hdmi_dai_component,
+ mt8173_afe_hdmi_dais,
+ ARRAY_SIZE(mt8173_afe_hdmi_dais));
+ if (ret)
+ goto err_comp;
+
+ dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
+ return 0;
+
+err_comp:
+ snd_soc_unregister_component(&pdev->dev);
+err_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mt8173_afe_runtime_suspend(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mt8173_afe_pcm_dt_match[] = {
+ { .compatible = "mediatek,mt8173-afe-pcm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8173_afe_pcm_dt_match);
+
+static const struct dev_pm_ops mt8173_afe_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt8173_afe_runtime_suspend,
+ mt8173_afe_runtime_resume, NULL)
+};
+
+static struct platform_driver mt8173_afe_pcm_driver = {
+ .driver = {
+ .name = "mt8173-afe-pcm",
+ .of_match_table = mt8173_afe_pcm_dt_match,
+ .pm = &mt8173_afe_pm_ops,
+ },
+ .probe = mt8173_afe_pcm_dev_probe,
+ .remove = mt8173_afe_pcm_dev_remove,
+};
+
+module_platform_driver(mt8173_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver");
+MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/*
+ * mt8173-max98090.c -- MT8173 MAX98090 ALSA SoC machine driver
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/gpio.h>
+#include "../../codecs/max98090.h"
+
+static struct snd_soc_jack mt8173_max98090_jack;
+
+static struct snd_soc_jack_pin mt8173_max98090_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static const struct snd_soc_dapm_widget mt8173_max98090_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route mt8173_max98090_routes[] = {
+ {"Speaker", NULL, "SPKL"},
+ {"Speaker", NULL, "SPKR"},
+ {"DMICL", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPL"},
+ {"Headphone", NULL, "HPR"},
+ {"Headset Mic", NULL, "MICBIAS"},
+ {"IN34", NULL, "Headset Mic"},
+};
+
+static const struct snd_kcontrol_new mt8173_max98090_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static int mt8173_max98090_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ return snd_soc_dai_set_sysclk(codec_dai, 0, params_rate(params) * 256,
+ SND_SOC_CLOCK_IN);
+}
+
+static struct snd_soc_ops mt8173_max98090_ops = {
+ .hw_params = mt8173_max98090_hw_params,
+};
+
+static int mt8173_max98090_init(struct snd_soc_pcm_runtime *runtime)
+{
+ int ret;
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_codec *codec = runtime->codec;
+
+ /* enable jack detection */
+ ret = snd_soc_card_jack_new(card, "Headphone", SND_JACK_HEADPHONE,
+ &mt8173_max98090_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Can't snd_soc_jack_new %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_jack_add_pins(&mt8173_max98090_jack,
+ ARRAY_SIZE(mt8173_max98090_jack_pins),
+ mt8173_max98090_jack_pins);
+ if (ret) {
+ dev_err(card->dev, "Can't snd_soc_jack_add_pins %d\n", ret);
+ return ret;
+ }
+
+ return max98090_mic_detect(codec, &mt8173_max98090_jack);
+}
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link mt8173_max98090_dais[] = {
+ /* Front End DAI links */
+ {
+ .name = "MAX98090 Playback",
+ .stream_name = "MAX98090 Playback",
+ .cpu_dai_name = "DL1",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ {
+ .name = "MAX98090 Capture",
+ .stream_name = "MAX98090 Capture",
+ .cpu_dai_name = "VUL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ /* Back End DAI links */
+ {
+ .name = "Codec",
+ .cpu_dai_name = "I2S",
+ .no_pcm = 1,
+ .codec_dai_name = "HiFi",
+ .init = mt8173_max98090_init,
+ .ops = &mt8173_max98090_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+};
+
+static struct snd_soc_card mt8173_max98090_card = {
+ .name = "mt8173-max98090",
+ .owner = THIS_MODULE,
+ .dai_link = mt8173_max98090_dais,
+ .num_links = ARRAY_SIZE(mt8173_max98090_dais),
+ .controls = mt8173_max98090_controls,
+ .num_controls = ARRAY_SIZE(mt8173_max98090_controls),
+ .dapm_widgets = mt8173_max98090_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8173_max98090_widgets),
+ .dapm_routes = mt8173_max98090_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_max98090_routes),
+};
+
+static int mt8173_max98090_dev_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &mt8173_max98090_card;
+ struct device_node *codec_node, *platform_node;
+ int ret, i;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_max98090_dais[i].platform_name)
+ continue;
+ mt8173_max98090_dais[i].platform_of_node = platform_node;
+ }
+
+ codec_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,audio-codec", 0);
+ if (!codec_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_max98090_dais[i].codec_name)
+ continue;
+ mt8173_max98090_dais[i].codec_of_node = codec_node;
+ }
+ card->dev = &pdev->dev;
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static const struct of_device_id mt8173_max98090_dt_match[] = {
+ { .compatible = "mediatek,mt8173-max98090", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8173_max98090_dt_match);
+
+static struct platform_driver mt8173_max98090_driver = {
+ .driver = {
+ .name = "mt8173-max98090",
+ .of_match_table = mt8173_max98090_dt_match,
+#ifdef CONFIG_PM
+ .pm = &snd_soc_pm_ops,
+#endif
+ },
+ .probe = mt8173_max98090_dev_probe,
+};
+
+module_platform_driver(mt8173_max98090_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8173 MAX98090 ALSA SoC machine driver");
+MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mt8173-max98090");
+
--- /dev/null
+/*
+ * mt8173-rt5650-rt5514.c -- MT8173 machine driver with RT5650/5514 codecs
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "../../codecs/rt5645.h"
+
+#define MCLK_FOR_CODECS 12288000
+
+static const struct snd_soc_dapm_widget mt8173_rt5650_rt5514_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route mt8173_rt5650_rt5514_routes[] = {
+ {"Speaker", NULL, "SPOL"},
+ {"Speaker", NULL, "SPOR"},
+ {"Sub DMIC1L", NULL, "Int Mic"},
+ {"Sub DMIC1R", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"Headset Mic", NULL, "micbias1"},
+ {"Headset Mic", NULL, "micbias2"},
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN1N", NULL, "Headset Mic"},
+};
+
+static const struct snd_kcontrol_new mt8173_rt5650_rt5514_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static int mt8173_rt5650_rt5514_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int i, ret;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+
+ /* pll from mclk 12.288M */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
+ params_rate(params) * 512);
+ if (ret)
+ return ret;
+
+ /* sysclk from pll */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static struct snd_soc_ops mt8173_rt5650_rt5514_ops = {
+ .hw_params = mt8173_rt5650_rt5514_hw_params,
+};
+
+static struct snd_soc_jack mt8173_rt5650_rt5514_jack;
+
+static int mt8173_rt5650_rt5514_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
+ int ret;
+
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_DA_STEREO_FILTER |
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+
+ /* enable jack detection */
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &mt8173_rt5650_rt5514_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Can't new Headset Jack %d\n", ret);
+ return ret;
+ }
+
+ return rt5645_set_jack_detect(codec,
+ &mt8173_rt5650_rt5514_jack,
+ &mt8173_rt5650_rt5514_jack,
+ &mt8173_rt5650_rt5514_jack);
+}
+
+static struct snd_soc_dai_link_component mt8173_rt5650_rt5514_codecs[] = {
+ {
+ .dai_name = "rt5645-aif1",
+ },
+ {
+ .dai_name = "rt5514-aif1",
+ },
+};
+
+enum {
+ DAI_LINK_PLAYBACK,
+ DAI_LINK_CAPTURE,
+ DAI_LINK_CODEC_I2S,
+};
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link mt8173_rt5650_rt5514_dais[] = {
+ /* Front End DAI links */
+ [DAI_LINK_PLAYBACK] = {
+ .name = "rt5650_rt5514 Playback",
+ .stream_name = "rt5650_rt5514 Playback",
+ .cpu_dai_name = "DL1",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_CAPTURE] = {
+ .name = "rt5650_rt5514 Capture",
+ .stream_name = "rt5650_rt5514 Capture",
+ .cpu_dai_name = "VUL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ /* Back End DAI links */
+ [DAI_LINK_CODEC_I2S] = {
+ .name = "Codec",
+ .cpu_dai_name = "I2S",
+ .no_pcm = 1,
+ .codecs = mt8173_rt5650_rt5514_codecs,
+ .num_codecs = 2,
+ .init = mt8173_rt5650_rt5514_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &mt8173_rt5650_rt5514_ops,
+ .ignore_pmdown_time = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+};
+
+static struct snd_soc_codec_conf mt8173_rt5650_rt5514_codec_conf[] = {
+ {
+ .name_prefix = "Sub",
+ },
+};
+
+static struct snd_soc_card mt8173_rt5650_rt5514_card = {
+ .name = "mtk-rt5650-rt5514",
+ .owner = THIS_MODULE,
+ .dai_link = mt8173_rt5650_rt5514_dais,
+ .num_links = ARRAY_SIZE(mt8173_rt5650_rt5514_dais),
+ .codec_conf = mt8173_rt5650_rt5514_codec_conf,
+ .num_configs = ARRAY_SIZE(mt8173_rt5650_rt5514_codec_conf),
+ .controls = mt8173_rt5650_rt5514_controls,
+ .num_controls = ARRAY_SIZE(mt8173_rt5650_rt5514_controls),
+ .dapm_widgets = mt8173_rt5650_rt5514_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8173_rt5650_rt5514_widgets),
+ .dapm_routes = mt8173_rt5650_rt5514_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_rt5650_rt5514_routes),
+};
+
+static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &mt8173_rt5650_rt5514_card;
+ struct device_node *platform_node;
+ int i, ret;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_rt5650_rt5514_dais[i].platform_name)
+ continue;
+ mt8173_rt5650_rt5514_dais[i].platform_of_node = platform_node;
+ }
+
+ mt8173_rt5650_rt5514_codecs[0].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
+ if (!mt8173_rt5650_rt5514_codecs[0].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt8173_rt5650_rt5514_codecs[1].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ if (!mt8173_rt5650_rt5514_codecs[1].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt8173_rt5650_rt5514_codec_conf[0].of_node =
+ mt8173_rt5650_rt5514_codecs[1].of_node;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static const struct of_device_id mt8173_rt5650_rt5514_dt_match[] = {
+ { .compatible = "mediatek,mt8173-rt5650-rt5514", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8173_rt5650_rt5514_dt_match);
+
+static struct platform_driver mt8173_rt5650_rt5514_driver = {
+ .driver = {
+ .name = "mtk-rt5650-rt5514",
+ .of_match_table = mt8173_rt5650_rt5514_dt_match,
+#ifdef CONFIG_PM
+ .pm = &snd_soc_pm_ops,
+#endif
+ },
+ .probe = mt8173_rt5650_rt5514_dev_probe,
+};
+
+module_platform_driver(mt8173_rt5650_rt5514_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8173 RT5650 and RT5514 SoC machine driver");
+MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-rt5650-rt5514");
+
--- /dev/null
+/*
+ * mt8173-rt5650-rt5676.c -- MT8173 machine driver with RT5650/5676 codecs
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "../../codecs/rt5645.h"
+#include "../../codecs/rt5677.h"
+
+#define MCLK_FOR_CODECS 12288000
+
+static const struct snd_soc_dapm_widget mt8173_rt5650_rt5676_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route mt8173_rt5650_rt5676_routes[] = {
+ {"Speaker", NULL, "SPOL"},
+ {"Speaker", NULL, "SPOR"},
+ {"Speaker", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650 */
+ {"Sub DMIC L1", NULL, "Int Mic"}, /* DMIC from 5676 */
+ {"Sub DMIC R1", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"Headphone", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650 */
+ {"Headset Mic", NULL, "micbias1"},
+ {"Headset Mic", NULL, "micbias2"},
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN1N", NULL, "Headset Mic"},
+ {"Sub AIF2RX", NULL, "Headset Mic"}, /* IF2 DAC from 5650 */
+};
+
+static const struct snd_kcontrol_new mt8173_rt5650_rt5676_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static int mt8173_rt5650_rt5676_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int i, ret;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+
+ /* pll from mclk 12.288M */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
+ params_rate(params) * 512);
+ if (ret)
+ return ret;
+
+ /* sysclk from pll */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static struct snd_soc_ops mt8173_rt5650_rt5676_ops = {
+ .hw_params = mt8173_rt5650_rt5676_hw_params,
+};
+
+static struct snd_soc_jack mt8173_rt5650_rt5676_jack;
+
+static int mt8173_rt5650_rt5676_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
+ struct snd_soc_codec *codec_sub = runtime->codec_dais[1]->codec;
+ int ret;
+
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_DA_STEREO_FILTER |
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+ rt5677_sel_asrc_clk_src(codec_sub,
+ RT5677_DA_STEREO_FILTER |
+ RT5677_AD_STEREO1_FILTER,
+ RT5677_CLK_SEL_I2S1_ASRC);
+ rt5677_sel_asrc_clk_src(codec_sub,
+ RT5677_AD_STEREO2_FILTER |
+ RT5677_I2S2_SOURCE,
+ RT5677_CLK_SEL_I2S2_ASRC);
+
+ /* enable jack detection */
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &mt8173_rt5650_rt5676_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Can't new Headset Jack %d\n", ret);
+ return ret;
+ }
+
+ return rt5645_set_jack_detect(codec,
+ &mt8173_rt5650_rt5676_jack,
+ &mt8173_rt5650_rt5676_jack,
+ &mt8173_rt5650_rt5676_jack);
+}
+
+static struct snd_soc_dai_link_component mt8173_rt5650_rt5676_codecs[] = {
+ {
+ .dai_name = "rt5645-aif1",
+ },
+ {
+ .dai_name = "rt5677-aif1",
+ },
+};
+
+enum {
+ DAI_LINK_PLAYBACK,
+ DAI_LINK_CAPTURE,
+ DAI_LINK_HDMI,
+ DAI_LINK_CODEC_I2S,
+ DAI_LINK_HDMI_I2S,
+ DAI_LINK_INTERCODEC
+};
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
+ /* Front End DAI links */
+ [DAI_LINK_PLAYBACK] = {
+ .name = "rt5650_rt5676 Playback",
+ .stream_name = "rt5650_rt5676 Playback",
+ .cpu_dai_name = "DL1",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_CAPTURE] = {
+ .name = "rt5650_rt5676 Capture",
+ .stream_name = "rt5650_rt5676 Capture",
+ .cpu_dai_name = "VUL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_HDMI] = {
+ .name = "HDMI",
+ .stream_name = "HDMI PCM",
+ .cpu_dai_name = "HDMI",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+
+ /* Back End DAI links */
+ [DAI_LINK_CODEC_I2S] = {
+ .name = "Codec",
+ .cpu_dai_name = "I2S",
+ .no_pcm = 1,
+ .codecs = mt8173_rt5650_rt5676_codecs,
+ .num_codecs = 2,
+ .init = mt8173_rt5650_rt5676_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &mt8173_rt5650_rt5676_ops,
+ .ignore_pmdown_time = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_HDMI_I2S] = {
+ .name = "HDMI BE",
+ .cpu_dai_name = "HDMIO",
+ .no_pcm = 1,
+ .codec_dai_name = "i2s-hifi",
+ .dpcm_playback = 1,
+ },
+ /* rt5676 <-> rt5650 intercodec link: Sets rt5676 I2S2 as master */
+ [DAI_LINK_INTERCODEC] = {
+ .name = "rt5650_rt5676 intercodec",
+ .stream_name = "rt5650_rt5676 intercodec",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "snd-soc-dummy",
+ .no_pcm = 1,
+ .codec_dai_name = "rt5677-aif2",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ },
+
+};
+
+static struct snd_soc_codec_conf mt8173_rt5650_rt5676_codec_conf[] = {
+ {
+ .name_prefix = "Sub",
+ },
+};
+
+static struct snd_soc_card mt8173_rt5650_rt5676_card = {
+ .name = "mtk-rt5650-rt5676",
+ .owner = THIS_MODULE,
+ .dai_link = mt8173_rt5650_rt5676_dais,
+ .num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais),
+ .codec_conf = mt8173_rt5650_rt5676_codec_conf,
+ .num_configs = ARRAY_SIZE(mt8173_rt5650_rt5676_codec_conf),
+ .controls = mt8173_rt5650_rt5676_controls,
+ .num_controls = ARRAY_SIZE(mt8173_rt5650_rt5676_controls),
+ .dapm_widgets = mt8173_rt5650_rt5676_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8173_rt5650_rt5676_widgets),
+ .dapm_routes = mt8173_rt5650_rt5676_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_rt5650_rt5676_routes),
+};
+
+static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
+ struct device_node *platform_node;
+ int i, ret;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_rt5650_rt5676_dais[i].platform_name)
+ continue;
+ mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
+ }
+
+ mt8173_rt5650_rt5676_codecs[0].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
+ if (!mt8173_rt5650_rt5676_codecs[0].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt8173_rt5650_rt5676_codecs[1].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ if (!mt8173_rt5650_rt5676_codecs[1].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt8173_rt5650_rt5676_codec_conf[0].of_node =
+ mt8173_rt5650_rt5676_codecs[1].of_node;
+
+ mt8173_rt5650_rt5676_dais[DAI_LINK_INTERCODEC].codec_of_node =
+ mt8173_rt5650_rt5676_codecs[1].of_node;
+
+ mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 2);
+ if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static const struct of_device_id mt8173_rt5650_rt5676_dt_match[] = {
+ { .compatible = "mediatek,mt8173-rt5650-rt5676", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8173_rt5650_rt5676_dt_match);
+
+static struct platform_driver mt8173_rt5650_rt5676_driver = {
+ .driver = {
+ .name = "mtk-rt5650-rt5676",
+ .of_match_table = mt8173_rt5650_rt5676_dt_match,
+#ifdef CONFIG_PM
+ .pm = &snd_soc_pm_ops,
+#endif
+ },
+ .probe = mt8173_rt5650_rt5676_dev_probe,
+};
+
+module_platform_driver(mt8173_rt5650_rt5676_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8173 RT5650 and RT5676 SoC machine driver");
+MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-rt5650-rt5676");
+
--- /dev/null
+/*
+ * mt8173-rt5650.c -- MT8173 machine driver with RT5650 codecs
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "../../codecs/rt5645.h"
+
+#define MCLK_FOR_CODECS 12288000
+
+enum mt8173_rt5650_mclk {
+ MT8173_RT5650_MCLK_EXTERNAL = 0,
+ MT8173_RT5650_MCLK_INTERNAL,
+};
+
+struct mt8173_rt5650_platform_data {
+ enum mt8173_rt5650_mclk pll_from;
+ /* 0 = external oscillator; 1 = internal source from mt8173 */
+};
+
+static struct mt8173_rt5650_platform_data mt8173_rt5650_priv = {
+ .pll_from = MT8173_RT5650_MCLK_EXTERNAL,
+};
+
+static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
+ {"Speaker", NULL, "SPOL"},
+ {"Speaker", NULL, "SPOR"},
+ {"DMIC L1", NULL, "Int Mic"},
+ {"DMIC R1", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"Headset Mic", NULL, "micbias1"},
+ {"Headset Mic", NULL, "micbias2"},
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN1N", NULL, "Headset Mic"},
+};
+
+static const struct snd_kcontrol_new mt8173_rt5650_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ unsigned int mclk_clock;
+ int i, ret;
+
+ switch (mt8173_rt5650_priv.pll_from) {
+ case MT8173_RT5650_MCLK_EXTERNAL:
+ /* mclk = 12.288M */
+ mclk_clock = MCLK_FOR_CODECS;
+ break;
+ case MT8173_RT5650_MCLK_INTERNAL:
+ /* mclk = sampling rate*256 */
+ mclk_clock = params_rate(params) * 256;
+ break;
+ default:
+ /* mclk = 12.288M */
+ mclk_clock = MCLK_FOR_CODECS;
+ break;
+ }
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+
+ /* pll from mclk */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, mclk_clock,
+ params_rate(params) * 512);
+ if (ret)
+ return ret;
+
+ /* sysclk from pll */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static struct snd_soc_ops mt8173_rt5650_ops = {
+ .hw_params = mt8173_rt5650_hw_params,
+};
+
+static struct snd_soc_jack mt8173_rt5650_jack;
+
+static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
+ const char *codec_capture_dai = runtime->codec_dais[1]->name;
+ int ret;
+
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_DA_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+
+ if (!strcmp(codec_capture_dai, "rt5645-aif1")) {
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+ } else if (!strcmp(codec_capture_dai, "rt5645-aif2")) {
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S2_ASRC);
+ } else {
+ dev_warn(card->dev,
+ "Only one dai codec found in DTS, enabled rt5645 AD filter\n");
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+ }
+
+ /* enable jack detection */
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &mt8173_rt5650_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Can't new Headset Jack %d\n", ret);
+ return ret;
+ }
+
+ return rt5645_set_jack_detect(codec,
+ &mt8173_rt5650_jack,
+ &mt8173_rt5650_jack,
+ &mt8173_rt5650_jack);
+}
+
+static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = {
+ {
+ /* Playback */
+ .dai_name = "rt5645-aif1",
+ },
+ {
+ /* Capture */
+ .dai_name = "rt5645-aif1",
+ },
+};
+
+enum {
+ DAI_LINK_PLAYBACK,
+ DAI_LINK_CAPTURE,
+ DAI_LINK_HDMI,
+ DAI_LINK_CODEC_I2S,
+ DAI_LINK_HDMI_I2S,
+};
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
+ /* Front End DAI links */
+ [DAI_LINK_PLAYBACK] = {
+ .name = "rt5650 Playback",
+ .stream_name = "rt5650 Playback",
+ .cpu_dai_name = "DL1",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_CAPTURE] = {
+ .name = "rt5650 Capture",
+ .stream_name = "rt5650 Capture",
+ .cpu_dai_name = "VUL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_HDMI] = {
+ .name = "HDMI",
+ .stream_name = "HDMI PCM",
+ .cpu_dai_name = "HDMI",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ /* Back End DAI links */
+ [DAI_LINK_CODEC_I2S] = {
+ .name = "Codec",
+ .cpu_dai_name = "I2S",
+ .no_pcm = 1,
+ .codecs = mt8173_rt5650_codecs,
+ .num_codecs = 2,
+ .init = mt8173_rt5650_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &mt8173_rt5650_ops,
+ .ignore_pmdown_time = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_HDMI_I2S] = {
+ .name = "HDMI BE",
+ .cpu_dai_name = "HDMIO",
+ .no_pcm = 1,
+ .codec_dai_name = "i2s-hifi",
+ .dpcm_playback = 1,
+ },
+};
+
+static struct snd_soc_card mt8173_rt5650_card = {
+ .name = "mtk-rt5650",
+ .owner = THIS_MODULE,
+ .dai_link = mt8173_rt5650_dais,
+ .num_links = ARRAY_SIZE(mt8173_rt5650_dais),
+ .controls = mt8173_rt5650_controls,
+ .num_controls = ARRAY_SIZE(mt8173_rt5650_controls),
+ .dapm_widgets = mt8173_rt5650_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8173_rt5650_widgets),
+ .dapm_routes = mt8173_rt5650_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_rt5650_routes),
+};
+
+static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &mt8173_rt5650_card;
+ struct device_node *platform_node;
+ struct device_node *np;
+ const char *codec_capture_dai;
+ int i, ret;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < card->num_links; i++) {
+ if (mt8173_rt5650_dais[i].platform_name)
+ continue;
+ mt8173_rt5650_dais[i].platform_of_node = platform_node;
+ }
+
+ mt8173_rt5650_codecs[0].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
+ if (!mt8173_rt5650_codecs[0].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt8173_rt5650_codecs[1].of_node = mt8173_rt5650_codecs[0].of_node;
+
+ if (of_find_node_by_name(platform_node, "codec-capture")) {
+ np = of_get_child_by_name(pdev->dev.of_node, "codec-capture");
+ if (!np) {
+ dev_err(&pdev->dev,
+ "%s: Can't find codec-capture DT node\n",
+ __func__);
+ return -EINVAL;
+ }
+ ret = snd_soc_of_get_dai_name(np, &codec_capture_dai);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s codec_capture_dai name fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+ mt8173_rt5650_codecs[1].dai_name = codec_capture_dai;
+ }
+
+ if (device_property_present(&pdev->dev, "mediatek,mclk")) {
+ ret = device_property_read_u32(&pdev->dev,
+ "mediatek,mclk",
+ &mt8173_rt5650_priv.pll_from);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ }
+ }
+
+ mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codec_of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ if (!mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static const struct of_device_id mt8173_rt5650_dt_match[] = {
+ { .compatible = "mediatek,mt8173-rt5650", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt8173_rt5650_dt_match);
+
+static struct platform_driver mt8173_rt5650_driver = {
+ .driver = {
+ .name = "mtk-rt5650",
+ .of_match_table = mt8173_rt5650_dt_match,
+#ifdef CONFIG_PM
+ .pm = &snd_soc_pm_ops,
+#endif
+ },
+ .probe = mt8173_rt5650_dev_probe,
+};
+
+module_platform_driver(mt8173_rt5650_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8173 RT5650 SoC machine driver");
+MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-rt5650");
+
+++ /dev/null
-/*
- * mtk_afe_common.h -- Mediatek audio driver common definitions
- *
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- * Sascha Hauer <s.hauer@pengutronix.de>
- * Hidalgo Huang <hidalgo.huang@mediatek.com>
- * Ir Lian <ir.lian@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MTK_AFE_COMMON_H_
-#define _MTK_AFE_COMMON_H_
-
-#include <linux/clk.h>
-#include <linux/regmap.h>
-
-enum {
- MTK_AFE_MEMIF_DL1,
- MTK_AFE_MEMIF_DL2,
- MTK_AFE_MEMIF_VUL,
- MTK_AFE_MEMIF_DAI,
- MTK_AFE_MEMIF_AWB,
- MTK_AFE_MEMIF_MOD_DAI,
- MTK_AFE_MEMIF_HDMI,
- MTK_AFE_MEMIF_NUM,
- MTK_AFE_IO_MOD_PCM1 = MTK_AFE_MEMIF_NUM,
- MTK_AFE_IO_MOD_PCM2,
- MTK_AFE_IO_PMIC,
- MTK_AFE_IO_I2S,
- MTK_AFE_IO_2ND_I2S,
- MTK_AFE_IO_HW_GAIN1,
- MTK_AFE_IO_HW_GAIN2,
- MTK_AFE_IO_MRG_O,
- MTK_AFE_IO_MRG_I,
- MTK_AFE_IO_DAIBT,
- MTK_AFE_IO_HDMI,
-};
-
-enum {
- MTK_AFE_IRQ_1,
- MTK_AFE_IRQ_2,
- MTK_AFE_IRQ_3,
- MTK_AFE_IRQ_4,
- MTK_AFE_IRQ_5,
- MTK_AFE_IRQ_6,
- MTK_AFE_IRQ_7,
- MTK_AFE_IRQ_8,
- MTK_AFE_IRQ_NUM,
-};
-
-enum {
- MTK_CLK_INFRASYS_AUD,
- MTK_CLK_TOP_PDN_AUD,
- MTK_CLK_TOP_PDN_AUD_BUS,
- MTK_CLK_I2S0_M,
- MTK_CLK_I2S1_M,
- MTK_CLK_I2S2_M,
- MTK_CLK_I2S3_M,
- MTK_CLK_I2S3_B,
- MTK_CLK_BCK0,
- MTK_CLK_BCK1,
- MTK_CLK_NUM
-};
-
-struct mtk_afe;
-struct snd_pcm_substream;
-
-struct mtk_afe_memif_data {
- int id;
- const char *name;
- int reg_ofs_base;
- int reg_ofs_cur;
- int fs_shift;
- int mono_shift;
- int enable_shift;
- int irq_reg_cnt;
- int irq_cnt_shift;
- int irq_en_shift;
- int irq_fs_shift;
- int irq_clr_shift;
- int msb_shift;
-};
-
-struct mtk_afe_memif {
- unsigned int phys_buf_addr;
- int buffer_size;
- struct snd_pcm_substream *substream;
- const struct mtk_afe_memif_data *data;
- const struct mtk_afe_irq_data *irqdata;
-};
-
-#endif
+++ /dev/null
-/*
- * Mediatek ALSA SoC AFE platform driver
- *
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- * Sascha Hauer <s.hauer@pengutronix.de>
- * Hidalgo Huang <hidalgo.huang@mediatek.com>
- * Ir Lian <ir.lian@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/dma-mapping.h>
-#include <linux/pm_runtime.h>
-#include <sound/soc.h>
-#include "mtk-afe-common.h"
-
-/*****************************************************************************
- * R E G I S T E R D E F I N I T I O N
- *****************************************************************************/
-#define AUDIO_TOP_CON0 0x0000
-#define AUDIO_TOP_CON1 0x0004
-#define AFE_DAC_CON0 0x0010
-#define AFE_DAC_CON1 0x0014
-#define AFE_I2S_CON1 0x0034
-#define AFE_I2S_CON2 0x0038
-#define AFE_CONN_24BIT 0x006c
-#define AFE_MEMIF_MSB 0x00cc
-
-#define AFE_CONN1 0x0024
-#define AFE_CONN2 0x0028
-#define AFE_CONN3 0x002c
-#define AFE_CONN7 0x0460
-#define AFE_CONN8 0x0464
-#define AFE_HDMI_CONN0 0x0390
-
-/* Memory interface */
-#define AFE_DL1_BASE 0x0040
-#define AFE_DL1_CUR 0x0044
-#define AFE_DL1_END 0x0048
-#define AFE_DL2_BASE 0x0050
-#define AFE_DL2_CUR 0x0054
-#define AFE_AWB_BASE 0x0070
-#define AFE_AWB_CUR 0x007c
-#define AFE_VUL_BASE 0x0080
-#define AFE_VUL_CUR 0x008c
-#define AFE_VUL_END 0x0088
-#define AFE_DAI_BASE 0x0090
-#define AFE_DAI_CUR 0x009c
-#define AFE_MOD_PCM_BASE 0x0330
-#define AFE_MOD_PCM_CUR 0x033c
-#define AFE_HDMI_OUT_BASE 0x0374
-#define AFE_HDMI_OUT_CUR 0x0378
-#define AFE_HDMI_OUT_END 0x037c
-
-#define AFE_ADDA_TOP_CON0 0x0120
-#define AFE_ADDA2_TOP_CON0 0x0600
-
-#define AFE_HDMI_OUT_CON0 0x0370
-
-#define AFE_IRQ_MCU_CON 0x03a0
-#define AFE_IRQ_STATUS 0x03a4
-#define AFE_IRQ_CLR 0x03a8
-#define AFE_IRQ_CNT1 0x03ac
-#define AFE_IRQ_CNT2 0x03b0
-#define AFE_IRQ_MCU_EN 0x03b4
-#define AFE_IRQ_CNT5 0x03bc
-#define AFE_IRQ_CNT7 0x03dc
-
-#define AFE_TDM_CON1 0x0548
-#define AFE_TDM_CON2 0x054c
-
-#define AFE_BASE_END_OFFSET 8
-#define AFE_IRQ_STATUS_BITS 0xff
-
-/* AUDIO_TOP_CON0 (0x0000) */
-#define AUD_TCON0_PDN_SPDF (0x1 << 21)
-#define AUD_TCON0_PDN_HDMI (0x1 << 20)
-#define AUD_TCON0_PDN_24M (0x1 << 9)
-#define AUD_TCON0_PDN_22M (0x1 << 8)
-#define AUD_TCON0_PDN_AFE (0x1 << 2)
-
-/* AFE_I2S_CON1 (0x0034) */
-#define AFE_I2S_CON1_LOW_JITTER_CLK (0x1 << 12)
-#define AFE_I2S_CON1_RATE(x) (((x) & 0xf) << 8)
-#define AFE_I2S_CON1_FORMAT_I2S (0x1 << 3)
-#define AFE_I2S_CON1_EN (0x1 << 0)
-
-/* AFE_I2S_CON2 (0x0038) */
-#define AFE_I2S_CON2_LOW_JITTER_CLK (0x1 << 12)
-#define AFE_I2S_CON2_RATE(x) (((x) & 0xf) << 8)
-#define AFE_I2S_CON2_FORMAT_I2S (0x1 << 3)
-#define AFE_I2S_CON2_EN (0x1 << 0)
-
-/* AFE_CONN_24BIT (0x006c) */
-#define AFE_CONN_24BIT_O04 (0x1 << 4)
-#define AFE_CONN_24BIT_O03 (0x1 << 3)
-
-/* AFE_HDMI_CONN0 (0x0390) */
-#define AFE_HDMI_CONN0_O37_I37 (0x7 << 21)
-#define AFE_HDMI_CONN0_O36_I36 (0x6 << 18)
-#define AFE_HDMI_CONN0_O35_I33 (0x3 << 15)
-#define AFE_HDMI_CONN0_O34_I32 (0x2 << 12)
-#define AFE_HDMI_CONN0_O33_I35 (0x5 << 9)
-#define AFE_HDMI_CONN0_O32_I34 (0x4 << 6)
-#define AFE_HDMI_CONN0_O31_I31 (0x1 << 3)
-#define AFE_HDMI_CONN0_O30_I30 (0x0 << 0)
-
-/* AFE_TDM_CON1 (0x0548) */
-#define AFE_TDM_CON1_LRCK_WIDTH(x) (((x) - 1) << 24)
-#define AFE_TDM_CON1_32_BCK_CYCLES (0x2 << 12)
-#define AFE_TDM_CON1_WLEN_32BIT (0x2 << 8)
-#define AFE_TDM_CON1_MSB_ALIGNED (0x1 << 4)
-#define AFE_TDM_CON1_1_BCK_DELAY (0x1 << 3)
-#define AFE_TDM_CON1_LRCK_INV (0x1 << 2)
-#define AFE_TDM_CON1_BCK_INV (0x1 << 1)
-#define AFE_TDM_CON1_EN (0x1 << 0)
-
-enum afe_tdm_ch_start {
- AFE_TDM_CH_START_O30_O31 = 0,
- AFE_TDM_CH_START_O32_O33,
- AFE_TDM_CH_START_O34_O35,
- AFE_TDM_CH_START_O36_O37,
- AFE_TDM_CH_ZERO,
-};
-
-static const unsigned int mtk_afe_backup_list[] = {
- AUDIO_TOP_CON0,
- AFE_CONN1,
- AFE_CONN2,
- AFE_CONN7,
- AFE_CONN8,
- AFE_DAC_CON1,
- AFE_DL1_BASE,
- AFE_DL1_END,
- AFE_VUL_BASE,
- AFE_VUL_END,
- AFE_HDMI_OUT_BASE,
- AFE_HDMI_OUT_END,
- AFE_HDMI_CONN0,
- AFE_DAC_CON0,
-};
-
-struct mtk_afe {
- /* address for ioremap audio hardware register */
- void __iomem *base_addr;
- struct device *dev;
- struct regmap *regmap;
- struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
- struct clk *clocks[MTK_CLK_NUM];
- unsigned int backup_regs[ARRAY_SIZE(mtk_afe_backup_list)];
- bool suspended;
-};
-
-static const struct snd_pcm_hardware mtk_afe_hardware = {
- .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_MMAP_VALID),
- .buffer_bytes_max = 256 * 1024,
- .period_bytes_min = 512,
- .period_bytes_max = 128 * 1024,
- .periods_min = 2,
- .periods_max = 256,
- .fifo_size = 0,
-};
-
-static snd_pcm_uframes_t mtk_afe_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- unsigned int hw_ptr;
- int ret;
-
- ret = regmap_read(afe->regmap, memif->data->reg_ofs_cur, &hw_ptr);
- if (ret || hw_ptr == 0) {
- dev_err(afe->dev, "%s hw_ptr err\n", __func__);
- hw_ptr = memif->phys_buf_addr;
- }
-
- return bytes_to_frames(substream->runtime,
- hw_ptr - memif->phys_buf_addr);
-}
-
-static const struct snd_pcm_ops mtk_afe_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .pointer = mtk_afe_pcm_pointer,
-};
-
-static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- size_t size;
- struct snd_card *card = rtd->card->snd_card;
- struct snd_pcm *pcm = rtd->pcm;
-
- size = mtk_afe_hardware.buffer_bytes_max;
-
- return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- card->dev, size, size);
-}
-
-static void mtk_afe_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
-static const struct snd_soc_platform_driver mtk_afe_pcm_platform = {
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
-};
-
-struct mtk_afe_rate {
- unsigned int rate;
- unsigned int regvalue;
-};
-
-static const struct mtk_afe_rate mtk_afe_i2s_rates[] = {
- { .rate = 8000, .regvalue = 0 },
- { .rate = 11025, .regvalue = 1 },
- { .rate = 12000, .regvalue = 2 },
- { .rate = 16000, .regvalue = 4 },
- { .rate = 22050, .regvalue = 5 },
- { .rate = 24000, .regvalue = 6 },
- { .rate = 32000, .regvalue = 8 },
- { .rate = 44100, .regvalue = 9 },
- { .rate = 48000, .regvalue = 10 },
- { .rate = 88000, .regvalue = 11 },
- { .rate = 96000, .regvalue = 12 },
- { .rate = 174000, .regvalue = 13 },
- { .rate = 192000, .regvalue = 14 },
-};
-
-static int mtk_afe_i2s_fs(unsigned int sample_rate)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtk_afe_i2s_rates); i++)
- if (mtk_afe_i2s_rates[i].rate == sample_rate)
- return mtk_afe_i2s_rates[i].regvalue;
-
- return -EINVAL;
-}
-
-static int mtk_afe_set_i2s(struct mtk_afe *afe, unsigned int rate)
-{
- unsigned int val;
- int fs = mtk_afe_i2s_fs(rate);
-
- if (fs < 0)
- return -EINVAL;
-
- /* from external ADC */
- regmap_update_bits(afe->regmap, AFE_ADDA_TOP_CON0, 0x1, 0x1);
- regmap_update_bits(afe->regmap, AFE_ADDA2_TOP_CON0, 0x1, 0x1);
-
- /* set input */
- val = AFE_I2S_CON2_LOW_JITTER_CLK |
- AFE_I2S_CON2_RATE(fs) |
- AFE_I2S_CON2_FORMAT_I2S;
-
- regmap_update_bits(afe->regmap, AFE_I2S_CON2, ~AFE_I2S_CON2_EN, val);
-
- /* set output */
- val = AFE_I2S_CON1_LOW_JITTER_CLK |
- AFE_I2S_CON1_RATE(fs) |
- AFE_I2S_CON1_FORMAT_I2S;
-
- regmap_update_bits(afe->regmap, AFE_I2S_CON1, ~AFE_I2S_CON1_EN, val);
- return 0;
-}
-
-static void mtk_afe_set_i2s_enable(struct mtk_afe *afe, bool enable)
-{
- unsigned int val;
-
- regmap_read(afe->regmap, AFE_I2S_CON2, &val);
- if (!!(val & AFE_I2S_CON2_EN) == enable)
- return;
-
- /* input */
- regmap_update_bits(afe->regmap, AFE_I2S_CON2, 0x1, enable);
-
- /* output */
- regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, enable);
-}
-
-static int mtk_afe_dais_enable_clks(struct mtk_afe *afe,
- struct clk *m_ck, struct clk *b_ck)
-{
- int ret;
-
- if (m_ck) {
- ret = clk_prepare_enable(m_ck);
- if (ret) {
- dev_err(afe->dev, "Failed to enable m_ck\n");
- return ret;
- }
- }
-
- if (b_ck) {
- ret = clk_prepare_enable(b_ck);
- if (ret) {
- dev_err(afe->dev, "Failed to enable b_ck\n");
- return ret;
- }
- }
- return 0;
-}
-
-static int mtk_afe_dais_set_clks(struct mtk_afe *afe,
- struct clk *m_ck, unsigned int mck_rate,
- struct clk *b_ck, unsigned int bck_rate)
-{
- int ret;
-
- if (m_ck) {
- ret = clk_set_rate(m_ck, mck_rate);
- if (ret) {
- dev_err(afe->dev, "Failed to set m_ck rate\n");
- return ret;
- }
- }
-
- if (b_ck) {
- ret = clk_set_rate(b_ck, bck_rate);
- if (ret) {
- dev_err(afe->dev, "Failed to set b_ck rate\n");
- return ret;
- }
- }
- return 0;
-}
-
-static void mtk_afe_dais_disable_clks(struct mtk_afe *afe,
- struct clk *m_ck, struct clk *b_ck)
-{
- if (m_ck)
- clk_disable_unprepare(m_ck);
- if (b_ck)
- clk_disable_unprepare(b_ck);
-}
-
-static int mtk_afe_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-
- if (dai->active)
- return 0;
-
- mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S1_M], NULL);
- mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S2_M], NULL);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M, 0);
- return 0;
-}
-
-static void mtk_afe_i2s_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-
- if (dai->active)
- return;
-
- mtk_afe_set_i2s_enable(afe, false);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M,
- AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M);
- mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S1_M], NULL);
- mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S2_M], NULL);
-}
-
-static int mtk_afe_i2s_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_pcm_runtime * const runtime = substream->runtime;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- int ret;
-
- mtk_afe_dais_set_clks(afe,
- afe->clocks[MTK_CLK_I2S1_M], runtime->rate * 256,
- NULL, 0);
- mtk_afe_dais_set_clks(afe,
- afe->clocks[MTK_CLK_I2S2_M], runtime->rate * 256,
- NULL, 0);
- /* config I2S */
- ret = mtk_afe_set_i2s(afe, substream->runtime->rate);
- if (ret)
- return ret;
-
- mtk_afe_set_i2s_enable(afe, true);
-
- return 0;
-}
-
-static int mtk_afe_hdmi_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-
- if (dai->active)
- return 0;
-
- mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S3_M],
- afe->clocks[MTK_CLK_I2S3_B]);
- return 0;
-}
-
-static void mtk_afe_hdmi_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-
- if (dai->active)
- return;
-
- mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S3_M],
- afe->clocks[MTK_CLK_I2S3_B]);
-}
-
-static int mtk_afe_hdmi_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_pcm_runtime * const runtime = substream->runtime;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- unsigned int val;
-
- mtk_afe_dais_set_clks(afe,
- afe->clocks[MTK_CLK_I2S3_M], runtime->rate * 128,
- afe->clocks[MTK_CLK_I2S3_B],
- runtime->rate * runtime->channels * 32);
-
- val = AFE_TDM_CON1_BCK_INV |
- AFE_TDM_CON1_LRCK_INV |
- AFE_TDM_CON1_1_BCK_DELAY |
- AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */
- AFE_TDM_CON1_WLEN_32BIT |
- AFE_TDM_CON1_32_BCK_CYCLES |
- AFE_TDM_CON1_LRCK_WIDTH(32);
- regmap_update_bits(afe->regmap, AFE_TDM_CON1, ~AFE_TDM_CON1_EN, val);
-
- /* set tdm2 config */
- switch (runtime->channels) {
- case 1:
- case 2:
- val = AFE_TDM_CH_START_O30_O31;
- val |= (AFE_TDM_CH_ZERO << 4);
- val |= (AFE_TDM_CH_ZERO << 8);
- val |= (AFE_TDM_CH_ZERO << 12);
- break;
- case 3:
- case 4:
- val = AFE_TDM_CH_START_O30_O31;
- val |= (AFE_TDM_CH_START_O32_O33 << 4);
- val |= (AFE_TDM_CH_ZERO << 8);
- val |= (AFE_TDM_CH_ZERO << 12);
- break;
- case 5:
- case 6:
- val = AFE_TDM_CH_START_O30_O31;
- val |= (AFE_TDM_CH_START_O32_O33 << 4);
- val |= (AFE_TDM_CH_START_O34_O35 << 8);
- val |= (AFE_TDM_CH_ZERO << 12);
- break;
- case 7:
- case 8:
- val = AFE_TDM_CH_START_O30_O31;
- val |= (AFE_TDM_CH_START_O32_O33 << 4);
- val |= (AFE_TDM_CH_START_O34_O35 << 8);
- val |= (AFE_TDM_CH_START_O36_O37 << 12);
- break;
- default:
- val = 0;
- }
- regmap_update_bits(afe->regmap, AFE_TDM_CON2, 0x0000ffff, val);
-
- regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
- 0x000000f0, runtime->channels << 4);
- return 0;
-}
-
-static int mtk_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-
- dev_info(afe->dev, "%s cmd=%d %s\n", __func__, cmd, dai->name);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF, 0);
-
- /* set connections: O30~O37: L/R/LS/RS/C/LFE/CH7/CH8 */
- regmap_write(afe->regmap, AFE_HDMI_CONN0,
- AFE_HDMI_CONN0_O30_I30 | AFE_HDMI_CONN0_O31_I31 |
- AFE_HDMI_CONN0_O32_I34 | AFE_HDMI_CONN0_O33_I35 |
- AFE_HDMI_CONN0_O34_I32 | AFE_HDMI_CONN0_O35_I33 |
- AFE_HDMI_CONN0_O36_I36 | AFE_HDMI_CONN0_O37_I37);
-
- /* enable Out control */
- regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0x1);
-
- /* enable tdm */
- regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0x1);
-
- return 0;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- /* disable tdm */
- regmap_update_bits(afe->regmap, AFE_TDM_CON1, 0x1, 0);
-
- /* disable Out control */
- regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0);
-
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF,
- AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF);
-
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int mtk_afe_dais_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- int ret;
-
- memif->substream = substream;
-
- snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware);
-
- /*
- * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
- * smaller than period_size due to AFE's internal buffer.
- * This easily leads to overrun when avail_min is period_size.
- * One more period can hold the possible unread buffer.
- */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- ret = snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS,
- 3,
- mtk_afe_hardware.periods_max);
- if (ret < 0) {
- dev_err(afe->dev, "hw_constraint_minmax failed\n");
- return ret;
- }
- }
- ret = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
- if (ret < 0)
- dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
- return ret;
-}
-
-static void mtk_afe_dais_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
-
- memif->substream = NULL;
-}
-
-static int mtk_afe_dais_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- int msb_at_bit33 = 0;
- int ret;
-
- dev_dbg(afe->dev,
- "%s period = %u, rate= %u, channels=%u\n",
- __func__, params_period_size(params), params_rate(params),
- params_channels(params));
-
- ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
- if (ret < 0)
- return ret;
-
- msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
- memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
- memif->buffer_size = substream->runtime->dma_bytes;
-
- /* start */
- regmap_write(afe->regmap,
- memif->data->reg_ofs_base, memif->phys_buf_addr);
- /* end */
- regmap_write(afe->regmap,
- memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
- memif->phys_buf_addr + memif->buffer_size - 1);
-
- /* set MSB to 33-bit */
- regmap_update_bits(afe->regmap, AFE_MEMIF_MSB,
- 1 << memif->data->msb_shift,
- msb_at_bit33 << memif->data->msb_shift);
-
- /* set channel */
- if (memif->data->mono_shift >= 0) {
- unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
-
- regmap_update_bits(afe->regmap, AFE_DAC_CON1,
- 1 << memif->data->mono_shift,
- mono << memif->data->mono_shift);
- }
-
- /* set rate */
- if (memif->data->fs_shift < 0)
- return 0;
- if (memif->data->id == MTK_AFE_MEMIF_DAI ||
- memif->data->id == MTK_AFE_MEMIF_MOD_DAI) {
- unsigned int val;
-
- switch (params_rate(params)) {
- case 8000:
- val = 0;
- break;
- case 16000:
- val = 1;
- break;
- case 32000:
- val = 2;
- break;
- default:
- return -EINVAL;
- }
-
- if (memif->data->id == MTK_AFE_MEMIF_DAI)
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- 0x3 << memif->data->fs_shift,
- val << memif->data->fs_shift);
- else
- regmap_update_bits(afe->regmap, AFE_DAC_CON1,
- 0x3 << memif->data->fs_shift,
- val << memif->data->fs_shift);
-
- } else {
- int fs = mtk_afe_i2s_fs(params_rate(params));
-
- if (fs < 0)
- return -EINVAL;
-
- regmap_update_bits(afe->regmap, AFE_DAC_CON1,
- 0xf << memif->data->fs_shift,
- fs << memif->data->fs_shift);
- }
-
- return 0;
-}
-
-static int mtk_afe_dais_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int mtk_afe_dais_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_pcm_runtime * const runtime = substream->runtime;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- unsigned int counter = runtime->period_size;
-
- dev_info(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- if (memif->data->enable_shift >= 0)
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- 1 << memif->data->enable_shift,
- 1 << memif->data->enable_shift);
-
- /* set irq counter */
- regmap_update_bits(afe->regmap,
- memif->data->irq_reg_cnt,
- 0x3ffff << memif->data->irq_cnt_shift,
- counter << memif->data->irq_cnt_shift);
-
- /* set irq fs */
- if (memif->data->irq_fs_shift >= 0) {
- int fs = mtk_afe_i2s_fs(runtime->rate);
-
- if (fs < 0)
- return -EINVAL;
-
- regmap_update_bits(afe->regmap,
- AFE_IRQ_MCU_CON,
- 0xf << memif->data->irq_fs_shift,
- fs << memif->data->irq_fs_shift);
- }
- /* enable interrupt */
- regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON,
- 1 << memif->data->irq_en_shift,
- 1 << memif->data->irq_en_shift);
-
- return 0;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- if (memif->data->enable_shift >= 0)
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- 1 << memif->data->enable_shift, 0);
- /* disable interrupt */
- regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON,
- 1 << memif->data->irq_en_shift,
- 0 << memif->data->irq_en_shift);
- /* and clear pending IRQ */
- regmap_write(afe->regmap, AFE_IRQ_CLR,
- 1 << memif->data->irq_clr_shift);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/* FE DAIs */
-static const struct snd_soc_dai_ops mtk_afe_dai_ops = {
- .startup = mtk_afe_dais_startup,
- .shutdown = mtk_afe_dais_shutdown,
- .hw_params = mtk_afe_dais_hw_params,
- .hw_free = mtk_afe_dais_hw_free,
- .trigger = mtk_afe_dais_trigger,
-};
-
-/* BE DAIs */
-static const struct snd_soc_dai_ops mtk_afe_i2s_ops = {
- .startup = mtk_afe_i2s_startup,
- .shutdown = mtk_afe_i2s_shutdown,
- .prepare = mtk_afe_i2s_prepare,
-};
-
-static const struct snd_soc_dai_ops mtk_afe_hdmi_ops = {
- .startup = mtk_afe_hdmi_startup,
- .shutdown = mtk_afe_hdmi_shutdown,
- .prepare = mtk_afe_hdmi_prepare,
- .trigger = mtk_afe_hdmi_trigger,
-
-};
-
-static int mtk_afe_runtime_suspend(struct device *dev);
-static int mtk_afe_runtime_resume(struct device *dev);
-
-static int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
-{
- struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
- int i;
-
- dev_dbg(afe->dev, "%s\n", __func__);
- if (pm_runtime_status_suspended(afe->dev) || afe->suspended)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
- regmap_read(afe->regmap, mtk_afe_backup_list[i],
- &afe->backup_regs[i]);
-
- afe->suspended = true;
- mtk_afe_runtime_suspend(afe->dev);
- return 0;
-}
-
-static int mtk_afe_dai_resume(struct snd_soc_dai *dai)
-{
- struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
- int i = 0;
-
- dev_dbg(afe->dev, "%s\n", __func__);
- if (pm_runtime_status_suspended(afe->dev) || !afe->suspended)
- return 0;
-
- mtk_afe_runtime_resume(afe->dev);
-
- for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
- regmap_write(afe->regmap, mtk_afe_backup_list[i],
- afe->backup_regs[i]);
-
- afe->suspended = false;
- return 0;
-}
-
-static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
- /* FE DAIs: memory intefaces to CPU */
- {
- .name = "DL1", /* downlink 1 */
- .id = MTK_AFE_MEMIF_DL1,
- .suspend = mtk_afe_dai_suspend,
- .resume = mtk_afe_dai_resume,
- .playback = {
- .stream_name = "DL1",
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &mtk_afe_dai_ops,
- }, {
- .name = "VUL", /* voice uplink */
- .id = MTK_AFE_MEMIF_VUL,
- .suspend = mtk_afe_dai_suspend,
- .resume = mtk_afe_dai_resume,
- .capture = {
- .stream_name = "VUL",
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &mtk_afe_dai_ops,
- }, {
- /* BE DAIs */
- .name = "I2S",
- .id = MTK_AFE_IO_I2S,
- .playback = {
- .stream_name = "I2S Playback",
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .capture = {
- .stream_name = "I2S Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &mtk_afe_i2s_ops,
- .symmetric_rates = 1,
- },
-};
-
-static struct snd_soc_dai_driver mtk_afe_hdmi_dais[] = {
- /* FE DAIs */
- {
- .name = "HDMI",
- .id = MTK_AFE_MEMIF_HDMI,
- .suspend = mtk_afe_dai_suspend,
- .resume = mtk_afe_dai_resume,
- .playback = {
- .stream_name = "HDMI",
- .channels_min = 2,
- .channels_max = 8,
- .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
- SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
- SNDRV_PCM_RATE_192000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &mtk_afe_dai_ops,
- }, {
- /* BE DAIs */
- .name = "HDMIO",
- .id = MTK_AFE_IO_HDMI,
- .playback = {
- .stream_name = "HDMIO Playback",
- .channels_min = 2,
- .channels_max = 8,
- .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
- SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
- SNDRV_PCM_RATE_192000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .ops = &mtk_afe_hdmi_ops,
- },
-};
-
-static const struct snd_kcontrol_new mtk_afe_o03_mix[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 21, 1, 0),
-};
-
-static const struct snd_kcontrol_new mtk_afe_o04_mix[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 6, 1, 0),
-};
-
-static const struct snd_kcontrol_new mtk_afe_o09_mix[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 0, 1, 0),
- SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN7, 30, 1, 0),
-};
-
-static const struct snd_kcontrol_new mtk_afe_o10_mix[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("I04 Switch", AFE_CONN3, 3, 1, 0),
- SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN8, 0, 1, 0),
-};
-
-static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
- /* inter-connections */
- SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("I04", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("I17", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0),
-
- SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0,
- mtk_afe_o03_mix, ARRAY_SIZE(mtk_afe_o03_mix)),
- SND_SOC_DAPM_MIXER("O04", SND_SOC_NOPM, 0, 0,
- mtk_afe_o04_mix, ARRAY_SIZE(mtk_afe_o04_mix)),
- SND_SOC_DAPM_MIXER("O09", SND_SOC_NOPM, 0, 0,
- mtk_afe_o09_mix, ARRAY_SIZE(mtk_afe_o09_mix)),
- SND_SOC_DAPM_MIXER("O10", SND_SOC_NOPM, 0, 0,
- mtk_afe_o10_mix, ARRAY_SIZE(mtk_afe_o10_mix)),
-};
-
-static const struct snd_soc_dapm_route mtk_afe_pcm_routes[] = {
- {"I05", NULL, "DL1"},
- {"I06", NULL, "DL1"},
- {"I2S Playback", NULL, "O03"},
- {"I2S Playback", NULL, "O04"},
- {"VUL", NULL, "O09"},
- {"VUL", NULL, "O10"},
- {"I03", NULL, "I2S Capture"},
- {"I04", NULL, "I2S Capture"},
- {"I17", NULL, "I2S Capture"},
- {"I18", NULL, "I2S Capture"},
- { "O03", "I05 Switch", "I05" },
- { "O04", "I06 Switch", "I06" },
- { "O09", "I17 Switch", "I17" },
- { "O09", "I03 Switch", "I03" },
- { "O10", "I18 Switch", "I18" },
- { "O10", "I04 Switch", "I04" },
-};
-
-static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = {
- {"HDMIO Playback", NULL, "HDMI"},
-};
-
-static const struct snd_soc_component_driver mtk_afe_pcm_dai_component = {
- .name = "mtk-afe-pcm-dai",
- .dapm_widgets = mtk_afe_pcm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mtk_afe_pcm_widgets),
- .dapm_routes = mtk_afe_pcm_routes,
- .num_dapm_routes = ARRAY_SIZE(mtk_afe_pcm_routes),
-};
-
-static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = {
- .name = "mtk-afe-hdmi-dai",
- .dapm_routes = mtk_afe_hdmi_routes,
- .num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes),
-};
-
-static const char *aud_clks[MTK_CLK_NUM] = {
- [MTK_CLK_INFRASYS_AUD] = "infra_sys_audio_clk",
- [MTK_CLK_TOP_PDN_AUD] = "top_pdn_audio",
- [MTK_CLK_TOP_PDN_AUD_BUS] = "top_pdn_aud_intbus",
- [MTK_CLK_I2S0_M] = "i2s0_m",
- [MTK_CLK_I2S1_M] = "i2s1_m",
- [MTK_CLK_I2S2_M] = "i2s2_m",
- [MTK_CLK_I2S3_M] = "i2s3_m",
- [MTK_CLK_I2S3_B] = "i2s3_b",
- [MTK_CLK_BCK0] = "bck0",
- [MTK_CLK_BCK1] = "bck1",
-};
-
-static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
- {
- .name = "DL1",
- .id = MTK_AFE_MEMIF_DL1,
- .reg_ofs_base = AFE_DL1_BASE,
- .reg_ofs_cur = AFE_DL1_CUR,
- .fs_shift = 0,
- .mono_shift = 21,
- .enable_shift = 1,
- .irq_reg_cnt = AFE_IRQ_CNT1,
- .irq_cnt_shift = 0,
- .irq_en_shift = 0,
- .irq_fs_shift = 4,
- .irq_clr_shift = 0,
- .msb_shift = 0,
- }, {
- .name = "DL2",
- .id = MTK_AFE_MEMIF_DL2,
- .reg_ofs_base = AFE_DL2_BASE,
- .reg_ofs_cur = AFE_DL2_CUR,
- .fs_shift = 4,
- .mono_shift = 22,
- .enable_shift = 2,
- .irq_reg_cnt = AFE_IRQ_CNT1,
- .irq_cnt_shift = 20,
- .irq_en_shift = 2,
- .irq_fs_shift = 16,
- .irq_clr_shift = 2,
- .msb_shift = 1,
- }, {
- .name = "VUL",
- .id = MTK_AFE_MEMIF_VUL,
- .reg_ofs_base = AFE_VUL_BASE,
- .reg_ofs_cur = AFE_VUL_CUR,
- .fs_shift = 16,
- .mono_shift = 27,
- .enable_shift = 3,
- .irq_reg_cnt = AFE_IRQ_CNT2,
- .irq_cnt_shift = 0,
- .irq_en_shift = 1,
- .irq_fs_shift = 8,
- .irq_clr_shift = 1,
- .msb_shift = 6,
- }, {
- .name = "DAI",
- .id = MTK_AFE_MEMIF_DAI,
- .reg_ofs_base = AFE_DAI_BASE,
- .reg_ofs_cur = AFE_DAI_CUR,
- .fs_shift = 24,
- .mono_shift = -1,
- .enable_shift = 4,
- .irq_reg_cnt = AFE_IRQ_CNT2,
- .irq_cnt_shift = 20,
- .irq_en_shift = 3,
- .irq_fs_shift = 20,
- .irq_clr_shift = 3,
- .msb_shift = 5,
- }, {
- .name = "AWB",
- .id = MTK_AFE_MEMIF_AWB,
- .reg_ofs_base = AFE_AWB_BASE,
- .reg_ofs_cur = AFE_AWB_CUR,
- .fs_shift = 12,
- .mono_shift = 24,
- .enable_shift = 6,
- .irq_reg_cnt = AFE_IRQ_CNT7,
- .irq_cnt_shift = 0,
- .irq_en_shift = 14,
- .irq_fs_shift = 24,
- .irq_clr_shift = 6,
- .msb_shift = 3,
- }, {
- .name = "MOD_DAI",
- .id = MTK_AFE_MEMIF_MOD_DAI,
- .reg_ofs_base = AFE_MOD_PCM_BASE,
- .reg_ofs_cur = AFE_MOD_PCM_CUR,
- .fs_shift = 30,
- .mono_shift = 30,
- .enable_shift = 7,
- .irq_reg_cnt = AFE_IRQ_CNT2,
- .irq_cnt_shift = 20,
- .irq_en_shift = 3,
- .irq_fs_shift = 20,
- .irq_clr_shift = 3,
- .msb_shift = 4,
- }, {
- .name = "HDMI",
- .id = MTK_AFE_MEMIF_HDMI,
- .reg_ofs_base = AFE_HDMI_OUT_BASE,
- .reg_ofs_cur = AFE_HDMI_OUT_CUR,
- .fs_shift = -1,
- .mono_shift = -1,
- .enable_shift = -1,
- .irq_reg_cnt = AFE_IRQ_CNT5,
- .irq_cnt_shift = 0,
- .irq_en_shift = 12,
- .irq_fs_shift = -1,
- .irq_clr_shift = 4,
- .msb_shift = 8,
- },
-};
-
-static const struct regmap_config mtk_afe_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = AFE_ADDA2_TOP_CON0,
- .cache_type = REGCACHE_NONE,
-};
-
-static irqreturn_t mtk_afe_irq_handler(int irq, void *dev_id)
-{
- struct mtk_afe *afe = dev_id;
- unsigned int reg_value;
- int i, ret;
-
- ret = regmap_read(afe->regmap, AFE_IRQ_STATUS, ®_value);
- if (ret) {
- dev_err(afe->dev, "%s irq status err\n", __func__);
- reg_value = AFE_IRQ_STATUS_BITS;
- goto err_irq;
- }
-
- for (i = 0; i < MTK_AFE_MEMIF_NUM; i++) {
- struct mtk_afe_memif *memif = &afe->memif[i];
-
- if (!(reg_value & (1 << memif->data->irq_clr_shift)))
- continue;
-
- snd_pcm_period_elapsed(memif->substream);
- }
-
-err_irq:
- /* clear irq */
- regmap_write(afe->regmap, AFE_IRQ_CLR, reg_value & AFE_IRQ_STATUS_BITS);
-
- return IRQ_HANDLED;
-}
-
-static int mtk_afe_runtime_suspend(struct device *dev)
-{
- struct mtk_afe *afe = dev_get_drvdata(dev);
-
- /* disable AFE */
- regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0);
-
- /* disable AFE clk */
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUD_TCON0_PDN_AFE, AUD_TCON0_PDN_AFE);
-
- clk_disable_unprepare(afe->clocks[MTK_CLK_BCK0]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_BCK1]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_INFRASYS_AUD]);
- return 0;
-}
-
-static int mtk_afe_runtime_resume(struct device *dev)
-{
- struct mtk_afe *afe = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_INFRASYS_AUD]);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]);
- if (ret)
- goto err_infra;
-
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_TOP_PDN_AUD]);
- if (ret)
- goto err_top_aud_bus;
-
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_BCK0]);
- if (ret)
- goto err_top_aud;
-
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_BCK1]);
- if (ret)
- goto err_bck0;
-
- /* enable AFE clk */
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_AFE, 0);
-
- /* set O3/O4 16bits */
- regmap_update_bits(afe->regmap, AFE_CONN_24BIT,
- AFE_CONN_24BIT_O03 | AFE_CONN_24BIT_O04, 0);
-
- /* unmask all IRQs */
- regmap_update_bits(afe->regmap, AFE_IRQ_MCU_EN, 0xff, 0xff);
-
- /* enable AFE */
- regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1);
- return 0;
-
-err_bck0:
- clk_disable_unprepare(afe->clocks[MTK_CLK_BCK0]);
-err_top_aud:
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD]);
-err_top_aud_bus:
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]);
-err_infra:
- clk_disable_unprepare(afe->clocks[MTK_CLK_INFRASYS_AUD]);
- return ret;
-}
-
-static int mtk_afe_init_audio_clk(struct mtk_afe *afe)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(aud_clks); i++) {
- afe->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
- if (IS_ERR(afe->clocks[i])) {
- dev_err(afe->dev, "%s devm_clk_get %s fail\n",
- __func__, aud_clks[i]);
- return PTR_ERR(afe->clocks[i]);
- }
- }
- clk_set_rate(afe->clocks[MTK_CLK_BCK0], 22579200); /* 22M */
- clk_set_rate(afe->clocks[MTK_CLK_BCK1], 24576000); /* 24M */
- return 0;
-}
-
-static int mtk_afe_pcm_dev_probe(struct platform_device *pdev)
-{
- int ret, i;
- unsigned int irq_id;
- struct mtk_afe *afe;
- struct resource *res;
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
- if (ret)
- return ret;
-
- afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
- if (!afe)
- return -ENOMEM;
-
- afe->dev = &pdev->dev;
-
- irq_id = platform_get_irq(pdev, 0);
- if (!irq_id) {
- dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
- return -ENXIO;
- }
- ret = devm_request_irq(afe->dev, irq_id, mtk_afe_irq_handler,
- 0, "Afe_ISR_Handle", (void *)afe);
- if (ret) {
- dev_err(afe->dev, "could not request_irq\n");
- return ret;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(afe->base_addr))
- return PTR_ERR(afe->base_addr);
-
- afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
- &mtk_afe_regmap_config);
- if (IS_ERR(afe->regmap))
- return PTR_ERR(afe->regmap);
-
- /* initial audio related clock */
- ret = mtk_afe_init_audio_clk(afe);
- if (ret) {
- dev_err(afe->dev, "mtk_afe_init_audio_clk fail\n");
- return ret;
- }
-
- for (i = 0; i < MTK_AFE_MEMIF_NUM; i++)
- afe->memif[i].data = &memif_data[i];
-
- platform_set_drvdata(pdev, afe);
-
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- ret = mtk_afe_runtime_resume(&pdev->dev);
- if (ret)
- goto err_pm_disable;
- }
-
- ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
- if (ret)
- goto err_pm_disable;
-
- ret = snd_soc_register_component(&pdev->dev,
- &mtk_afe_pcm_dai_component,
- mtk_afe_pcm_dais,
- ARRAY_SIZE(mtk_afe_pcm_dais));
- if (ret)
- goto err_platform;
-
- ret = snd_soc_register_component(&pdev->dev,
- &mtk_afe_hdmi_dai_component,
- mtk_afe_hdmi_dais,
- ARRAY_SIZE(mtk_afe_hdmi_dais));
- if (ret)
- goto err_comp;
-
- dev_info(&pdev->dev, "MTK AFE driver initialized.\n");
- return 0;
-
-err_comp:
- snd_soc_unregister_component(&pdev->dev);
-err_platform:
- snd_soc_unregister_platform(&pdev->dev);
-err_pm_disable:
- pm_runtime_disable(&pdev->dev);
- return ret;
-}
-
-static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- mtk_afe_runtime_suspend(&pdev->dev);
- snd_soc_unregister_component(&pdev->dev);
- snd_soc_unregister_platform(&pdev->dev);
- return 0;
-}
-
-static const struct of_device_id mtk_afe_pcm_dt_match[] = {
- { .compatible = "mediatek,mt8173-afe-pcm", },
- { }
-};
-MODULE_DEVICE_TABLE(of, mtk_afe_pcm_dt_match);
-
-static const struct dev_pm_ops mtk_afe_pm_ops = {
- SET_RUNTIME_PM_OPS(mtk_afe_runtime_suspend, mtk_afe_runtime_resume,
- NULL)
-};
-
-static struct platform_driver mtk_afe_pcm_driver = {
- .driver = {
- .name = "mtk-afe-pcm",
- .of_match_table = mtk_afe_pcm_dt_match,
- .pm = &mtk_afe_pm_ops,
- },
- .probe = mtk_afe_pcm_dev_probe,
- .remove = mtk_afe_pcm_dev_remove,
-};
-
-module_platform_driver(mtk_afe_pcm_driver);
-
-MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver");
-MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
-MODULE_LICENSE("GPL v2");
select SND_SOC_TWL6040
select SND_SOC_DMIC
select COMMON_CLK_PALMAS if (SOC_OMAP5 && MFD_PALMAS)
+ select CLK_TWL6040
help
Say Y if you want to add support for SoC audio on OMAP boards using
ABE and twl6040 codec. This driver currently supports:
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
unsigned long phys_base;
void __iomem *io_base;
int irq;
+ struct clk *pdmclk;
struct mutex mutex;
/* McPDM needs to be restarted due to runtime reconfiguration */
bool restart;
+ /* pm state for suspend/resume handling */
+ int pm_active_count;
+
struct snd_dmaengine_dai_dma_data dma_data[2];
};
*/
static void omap_mcpdm_open_streams(struct omap_mcpdm *mcpdm)
{
+ u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
+
+ omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl | MCPDM_WD_EN);
+
omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_SET,
MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL |
MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL);
mutex_lock(&mcpdm->mutex);
- if (!dai->active) {
- u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
-
- omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl | MCPDM_WD_EN);
+ if (!dai->active)
omap_mcpdm_open_streams(mcpdm);
- }
+
mutex_unlock(&mcpdm->mutex);
return 0;
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int ret;
+ clk_prepare_enable(mcpdm->pdmclk);
pm_runtime_enable(mcpdm->dev);
/* Disable lines while request is ongoing */
pm_runtime_disable(mcpdm->dev);
+ clk_disable_unprepare(mcpdm->pdmclk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_mcpdm_suspend(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ if (dai->active) {
+ omap_mcpdm_stop(mcpdm);
+ omap_mcpdm_close_streams(mcpdm);
+ }
+
+ mcpdm->pm_active_count = 0;
+ while (pm_runtime_active(mcpdm->dev)) {
+ pm_runtime_put_sync(mcpdm->dev);
+ mcpdm->pm_active_count++;
+ }
+
+ clk_disable_unprepare(mcpdm->pdmclk);
+
+ return 0;
+}
+
+static int omap_mcpdm_resume(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ clk_prepare_enable(mcpdm->pdmclk);
+
+ if (mcpdm->pm_active_count) {
+ while (mcpdm->pm_active_count--)
+ pm_runtime_get_sync(mcpdm->dev);
+
+ if (dai->active) {
+ omap_mcpdm_open_streams(mcpdm);
+ omap_mcpdm_start(mcpdm);
+ }
+ }
+
+
return 0;
}
+#else
+#define omap_mcpdm_suspend NULL
+#define omap_mcpdm_resume NULL
+#endif
#define OMAP_MCPDM_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
#define OMAP_MCPDM_FORMATS SNDRV_PCM_FMTBIT_S32_LE
static struct snd_soc_dai_driver omap_mcpdm_dai = {
.probe = omap_mcpdm_probe,
.remove = omap_mcpdm_remove,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
.probe_order = SND_SOC_COMP_ORDER_LATE,
.remove_order = SND_SOC_COMP_ORDER_EARLY,
.playback = {
mcpdm->dev = &pdev->dev;
+ mcpdm->pdmclk = devm_clk_get(&pdev->dev, "pdmclk");
+ if (IS_ERR(mcpdm->pdmclk)) {
+ if (PTR_ERR(mcpdm->pdmclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_warn(&pdev->dev, "Error getting pdmclk (%ld)!\n",
+ PTR_ERR(mcpdm->pdmclk));
+ mcpdm->pdmclk = NULL;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_mcpdm_component,
&omap_mcpdm_dai, 1);
*/
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#define DRV_NAME "rockchip-i2s"
+struct rk_i2s_pins {
+ u32 reg_offset;
+ u32 shift;
+};
+
struct rk_i2s_dev {
struct device *dev;
struct snd_dmaengine_dai_dma_data playback_dma_data;
struct regmap *regmap;
+ struct regmap *grf;
/*
* Used to indicate the tx/rx status.
bool tx_start;
bool rx_start;
bool is_master_mode;
+ const struct rk_i2s_pins *pins;
};
static int i2s_runtime_suspend(struct device *dev)
I2S_TXCR_VDW_MASK | I2S_TXCR_CSR_MASK,
val);
+ if (!IS_ERR(i2s->grf) && i2s->pins) {
+ regmap_read(i2s->regmap, I2S_TXCR, &val);
+ val &= I2S_TXCR_CSR_MASK;
+
+ switch (val) {
+ case I2S_CHN_4:
+ val = I2S_IO_4CH_OUT_6CH_IN;
+ break;
+ case I2S_CHN_6:
+ val = I2S_IO_6CH_OUT_4CH_IN;
+ break;
+ case I2S_CHN_8:
+ val = I2S_IO_8CH_OUT_2CH_IN;
+ break;
+ default:
+ val = I2S_IO_2CH_OUT_8CH_IN;
+ break;
+ }
+
+ val <<= i2s->pins->shift;
+ val |= (I2S_IO_DIRECTION_MASK << i2s->pins->shift) << 16;
+ regmap_write(i2s->grf, i2s->pins->reg_offset, val);
+ }
+
regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
I2S_DMACR_TDL(16));
regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_RDL_MASK,
I2S_DMACR_RDL(16));
val = I2S_CKR_TRCM_TXRX;
- if (dai->driver->symmetric_rates || rtd->dai_link->symmetric_rates)
- val = I2S_CKR_TRCM_TXSHARE;
+ if (dai->driver->symmetric_rates && rtd->dai_link->symmetric_rates)
+ val = I2S_CKR_TRCM_TXONLY;
regmap_update_bits(i2s->regmap, I2S_CKR,
I2S_CKR_TRCM_MASK,
.cache_type = REGCACHE_FLAT,
};
+static const struct rk_i2s_pins rk3399_i2s_pins = {
+ .reg_offset = 0xe220,
+ .shift = 11,
+};
+
+static const struct of_device_id rockchip_i2s_match[] = {
+ { .compatible = "rockchip,rk3066-i2s", },
+ { .compatible = "rockchip,rk3188-i2s", },
+ { .compatible = "rockchip,rk3288-i2s", },
+ { .compatible = "rockchip,rk3399-i2s", .data = &rk3399_i2s_pins },
+ {},
+};
+
static int rockchip_i2s_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *of_id;
struct rk_i2s_dev *i2s;
struct snd_soc_dai_driver *soc_dai;
struct resource *res;
return -ENOMEM;
}
+ i2s->dev = &pdev->dev;
+
+ i2s->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
+ if (!IS_ERR(i2s->grf)) {
+ of_id = of_match_device(rockchip_i2s_match, &pdev->dev);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ i2s->pins = of_id->data;
+ }
+
/* try to prepare related clocks */
i2s->hclk = devm_clk_get(&pdev->dev, "i2s_hclk");
if (IS_ERR(i2s->hclk)) {
i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
i2s->capture_dma_data.maxburst = 4;
- i2s->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, i2s);
pm_runtime_enable(&pdev->dev);
return 0;
}
-static const struct of_device_id rockchip_i2s_match[] = {
- { .compatible = "rockchip,rk3066-i2s", },
- { .compatible = "rockchip,rk3188-i2s", },
- { .compatible = "rockchip,rk3288-i2s", },
- { .compatible = "rockchip,rk3399-i2s", },
- {},
-};
-
static const struct dev_pm_ops rockchip_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(i2s_runtime_suspend, i2s_runtime_resume,
NULL)
#define I2S_CKR_TRCM_SHIFT 28
#define I2S_CKR_TRCM(x) (x << I2S_CKR_TRCM_SHIFT)
#define I2S_CKR_TRCM_TXRX (0 << I2S_CKR_TRCM_SHIFT)
-#define I2S_CKR_TRCM_TXSHARE (1 << I2S_CKR_TRCM_SHIFT)
-#define I2S_CKR_TRCM_RXSHARE (2 << I2S_CKR_TRCM_SHIFT)
+#define I2S_CKR_TRCM_TXONLY (1 << I2S_CKR_TRCM_SHIFT)
+#define I2S_CKR_TRCM_RXONLY (2 << I2S_CKR_TRCM_SHIFT)
#define I2S_CKR_TRCM_MASK (3 << I2S_CKR_TRCM_SHIFT)
#define I2S_CKR_MSS_SHIFT 27
#define I2S_CKR_MSS_MASTER (0 << I2S_CKR_MSS_SHIFT)
#define I2S_TXDR (0x0024)
#define I2S_RXDR (0x0028)
+/* io direction cfg register */
+#define I2S_IO_DIRECTION_MASK (7)
+#define I2S_IO_8CH_OUT_2CH_IN (0)
+#define I2S_IO_6CH_OUT_4CH_IN (4)
+#define I2S_IO_4CH_OUT_6CH_IN (6)
+#define I2S_IO_2CH_OUT_8CH_IN (7)
+
#endif /* _ROCKCHIP_IIS_H */
#define DRV_NAME "rockchip-snd-max98090"
static struct snd_soc_jack headset_jack;
+
+/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin headset_jack_pins[] = {
{
- .pin = "Headset Jack",
- .mask = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
},
+
};
static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
static const struct snd_soc_dapm_route rk_audio_map[] = {
{"IN34", NULL, "Headset Mic"},
{"IN34", NULL, "MICBIAS"},
- {"MICBIAS", NULL, "Headset Mic"},
+ {"Headset Mic", NULL, "MICBIAS"},
{"DMICL", NULL, "Int Mic"},
{"Headphone", NULL, "HPL"},
{"Headphone", NULL, "HPR"},
return ret;
}
-static int rk_init(struct snd_soc_pcm_runtime *runtime)
-{
- /* Enable Headset and 4 Buttons Jack detection */
- return snd_soc_card_jack_new(runtime->card, "Headset Jack",
- SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &headset_jack,
- headset_jack_pins,
- ARRAY_SIZE(headset_jack_pins));
-}
-
-static int rk_98090_headset_init(struct snd_soc_component *component)
-{
- return ts3a227e_enable_jack_detect(component, &headset_jack);
-}
-
static struct snd_soc_ops rk_aif1_ops = {
.hw_params = rk_aif1_hw_params,
};
-static struct snd_soc_aux_dev rk_98090_headset_dev = {
- .name = "Headset Chip",
- .init = rk_98090_headset_init,
-};
-
static struct snd_soc_dai_link rk_dailink = {
.name = "max98090",
.stream_name = "Audio",
.codec_dai_name = "HiFi",
- .init = rk_init,
.ops = &rk_aif1_ops,
/* set max98090 as slave */
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
};
+static int rk_98090_headset_init(struct snd_soc_component *component);
+
+static struct snd_soc_aux_dev rk_98090_headset_dev = {
+ .name = "Headset Chip",
+ .init = rk_98090_headset_init,
+};
+
static struct snd_soc_card snd_soc_card_rk = {
.name = "ROCKCHIP-I2S",
.owner = THIS_MODULE,
.num_controls = ARRAY_SIZE(rk_mc_controls),
};
+static int rk_98090_headset_init(struct snd_soc_component *component)
+{
+ int ret;
+
+ /* Enable Headset and 4 Buttons Jack detection */
+ ret = snd_soc_card_jack_new(&snd_soc_card_rk, "Headset Jack",
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &headset_jack,
+ headset_jack_pins,
+ ARRAY_SIZE(headset_jack_pins));
+ if (ret)
+ return ret;
+
+ ret = ts3a227e_enable_jack_detect(component, &headset_jack);
+
+ return ret;
+}
+
static int snd_rk_mc_probe(struct platform_device *pdev)
{
int ret = 0;
int ret;
srate = params_rate(params);
- switch (srate) {
- case 32000:
- case 48000:
- case 96000:
- mclk = 96000 * 128; /* 12288000 hz */
- break;
- case 44100:
- mclk = 44100 * 256; /* 11289600 hz */
- break;
- case 192000:
- mclk = 192000 * 128; /* 24576000 hz */
- break;
- default:
- return -EINVAL;
- }
+ mclk = srate * 128;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
return ret;
}
- val |= SPDIF_CFGR_CLK_DIV(mclk/(srate * 256));
ret = regmap_update_bits(spdif->regmap, SPDIF_CFGR,
SPDIF_CFGR_CLK_DIV_MASK | SPDIF_CFGR_HALFWORD_ENABLE |
SDPIF_CFGR_VDW_MASK,
Say Y if you want to add audio support for various Snow
boards based on Exynos5 series of SoCs.
-config SND_SOC_ODROIDX2
- tristate "Audio support for Odroid-X2 and Odroid-U3"
- depends on SND_SOC_SAMSUNG && I2C
- select SND_SOC_MAX98090
- select SND_SAMSUNG_I2S
- help
- Say Y here to enable audio support for the Odroid-X2/U3.
-
config SND_SOC_ARNDALE_RT5631_ALC5631
tristate "Audio support for RT5631(ALC5631) on Arndale Board"
depends on SND_SOC_SAMSUNG && I2C
snd-soc-lowland-objs := lowland.o
snd-soc-littlemill-objs := littlemill.o
snd-soc-bells-objs := bells.o
-snd-soc-odroidx2-max98090-objs := odroidx2_max98090.o
snd-soc-arndale-rt5631-objs := arndale_rt5631.o
obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
-obj-$(CONFIG_SND_SOC_ODROIDX2) += snd-soc-odroidx2-max98090.o
obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
goto err5;
ret = samsung_asoc_dma_platform_register(&pdev->dev,
- ac97_pdata->dma_filter);
+ ac97_pdata->dma_filter,
+ NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err5;
void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
struct s3c_dma_params *playback,
struct s3c_dma_params *capture);
-int samsung_asoc_dma_platform_register(struct device *dev,
- dma_filter_fn fn);
-
+/*
+ * @tx, @rx arguments can be NULL if the DMA channel names are "tx", "rx",
+ * otherwise actual DMA channel names must be passed to this function.
+ */
+int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
+ const char *tx, const char *rx);
#endif
#include "dma.h"
-static struct snd_dmaengine_pcm_config samsung_dmaengine_pcm_config = {
- .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
-};
-
void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
struct s3c_dma_params *playback,
struct s3c_dma_params *capture)
}
EXPORT_SYMBOL_GPL(samsung_asoc_init_dma_data);
-int samsung_asoc_dma_platform_register(struct device *dev,
- dma_filter_fn filter)
+int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
+ const char *tx, const char *rx)
{
- samsung_dmaengine_pcm_config.compat_filter_fn = filter;
+ unsigned int flags = SND_DMAENGINE_PCM_FLAG_COMPAT;
+
+ struct snd_dmaengine_pcm_config *pcm_conf;
+
+ pcm_conf = devm_kzalloc(dev, sizeof(*pcm_conf), GFP_KERNEL);
+ if (!pcm_conf)
+ return -ENOMEM;
+
+ pcm_conf->prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
+ pcm_conf->compat_filter_fn = filter;
+
+ if (dev->of_node) {
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
+ } else {
+ flags |= SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME;
+ }
- return devm_snd_dmaengine_pcm_register(dev,
- &samsung_dmaengine_pcm_config,
- SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME |
- SND_DMAENGINE_PCM_FLAG_COMPAT);
+ return devm_snd_dmaengine_pcm_register(dev, pcm_conf, flags);
}
EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register);
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
return i2s;
}
-static const struct of_device_id exynos_i2s_match[];
-
-static inline const struct samsung_i2s_dai_data *samsung_i2s_get_driver_data(
- struct platform_device *pdev)
+static void i2s_free_sec_dai(struct i2s_dai *i2s)
{
- if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(exynos_i2s_match, pdev->dev.of_node);
- return match ? match->data : NULL;
- } else {
- return (struct samsung_i2s_dai_data *)
- platform_get_device_id(pdev)->driver_data;
- }
+ platform_device_del(i2s->pdev);
}
#ifdef CONFIG_PM
const struct samsung_i2s_dai_data *i2s_dai_data;
int ret;
- /* Call during Seconday interface registration */
- i2s_dai_data = samsung_i2s_get_driver_data(pdev);
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
+ i2s_dai_data = of_device_get_match_data(&pdev->dev);
+ else
+ i2s_dai_data = (struct samsung_i2s_dai_data *)
+ platform_get_device_id(pdev)->driver_data;
+ /* Call during the secondary interface registration */
if (i2s_dai_data->dai_type == TYPE_SEC) {
sec_dai = dev_get_drvdata(&pdev->dev);
if (!sec_dai) {
return ret;
return samsung_asoc_dma_platform_register(&pdev->dev,
- sec_dai->filter);
+ sec_dai->filter, "tx-sec", NULL);
}
pri_dai = i2s_alloc_dai(pdev, false);
return -EINVAL;
}
- devm_snd_soc_register_component(&pri_dai->pdev->dev,
+ ret = devm_snd_soc_register_component(&pri_dai->pdev->dev,
&samsung_i2s_component,
&pri_dai->i2s_dai_drv, 1);
+ if (ret < 0)
+ goto err_free_dai;
+
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
+ NULL, NULL);
+ if (ret < 0)
+ goto err_free_dai;
pm_runtime_enable(&pdev->dev);
- ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter);
- if (ret != 0)
- return ret;
+ ret = i2s_register_clock_provider(pdev);
+ if (!ret)
+ return 0;
- return i2s_register_clock_provider(pdev);
+ pm_runtime_disable(&pdev->dev);
+err_free_dai:
+ if (sec_dai)
+ i2s_free_sec_dai(sec_dai);
+ return ret;
}
static int samsung_i2s_remove(struct platform_device *pdev)
.i2s_variant_regs = &i2sv5_i2s1_regs,
};
-static const struct samsung_i2s_dai_data samsung_dai_type_pri = {
- .dai_type = TYPE_PRI,
-};
-
static const struct samsung_i2s_dai_data samsung_dai_type_sec = {
.dai_type = TYPE_SEC,
};
}, {
.name = "samsung-i2s-sec",
.driver_data = (kernel_ulong_t)&samsung_dai_type_sec,
- }, {
- .name = "samsung-i2sv4",
- .driver_data = (kernel_ulong_t)&i2sv5_dai_type,
},
{},
};
+++ /dev/null
-/*
- * Copyright (C) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/of.h>
-#include <linux/module.h>
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "i2s.h"
-
-struct odroidx2_drv_data {
- const struct snd_soc_dapm_widget *dapm_widgets;
- unsigned int num_dapm_widgets;
-};
-
-/* The I2S CDCLK output clock frequency for the MAX98090 codec */
-#define MAX98090_MCLK 19200000
-
-static struct snd_soc_dai_link odroidx2_dai[];
-
-static int odroidx2_late_probe(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai *codec_dai;
- struct snd_soc_dai *cpu_dai;
- int ret;
-
- rtd = snd_soc_get_pcm_runtime(card, card->dai_link[0].name);
- codec_dai = rtd->codec_dai;
- cpu_dai = rtd->cpu_dai;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, MAX98090_MCLK,
- SND_SOC_CLOCK_IN);
-
- if (ret < 0 || of_find_property(odroidx2_dai[0].codec_of_node,
- "clocks", NULL))
- return ret;
-
- /* Set the cpu DAI configuration in order to use CDCLK */
- return snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
- 0, SND_SOC_CLOCK_OUT);
-}
-
-static const struct snd_soc_dapm_widget odroidx2_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
- SND_SOC_DAPM_MIC("DMIC", NULL),
-};
-
-static const struct snd_soc_dapm_widget odroidu3_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_SPK("Speakers", NULL),
-};
-
-static struct snd_soc_dai_link odroidx2_dai[] = {
- {
- .name = "MAX98090",
- .stream_name = "MAX98090 PCM",
- .codec_dai_name = "HiFi",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM,
- }
-};
-
-static struct snd_soc_card odroidx2 = {
- .owner = THIS_MODULE,
- .dai_link = odroidx2_dai,
- .num_links = ARRAY_SIZE(odroidx2_dai),
- .fully_routed = true,
- .late_probe = odroidx2_late_probe,
-};
-
-static const struct odroidx2_drv_data odroidx2_drvdata = {
- .dapm_widgets = odroidx2_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(odroidx2_dapm_widgets),
-};
-
-static const struct odroidx2_drv_data odroidu3_drvdata = {
- .dapm_widgets = odroidu3_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(odroidu3_dapm_widgets),
-};
-
-static const struct of_device_id odroidx2_audio_of_match[] = {
- {
- .compatible = "samsung,odroidx2-audio",
- .data = &odroidx2_drvdata,
- }, {
- .compatible = "samsung,odroidu3-audio",
- .data = &odroidu3_drvdata,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, odroidx2_audio_of_match);
-
-static int odroidx2_audio_probe(struct platform_device *pdev)
-{
- struct device_node *snd_node = pdev->dev.of_node;
- struct snd_soc_card *card = &odroidx2;
- struct device_node *i2s_node, *codec_node;
- struct odroidx2_drv_data *dd;
- const struct of_device_id *of_id;
- int ret;
-
- of_id = of_match_node(odroidx2_audio_of_match, snd_node);
- dd = (struct odroidx2_drv_data *)of_id->data;
-
- card->num_dapm_widgets = dd->num_dapm_widgets;
- card->dapm_widgets = dd->dapm_widgets;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_of_parse_card_name(card, "samsung,model");
- if (ret < 0)
- return ret;
-
- ret = snd_soc_of_parse_audio_routing(card, "samsung,audio-routing");
- if (ret < 0)
- return ret;
-
- codec_node = of_parse_phandle(snd_node, "samsung,audio-codec", 0);
- if (!codec_node) {
- dev_err(&pdev->dev,
- "Failed parsing samsung,i2s-codec property\n");
- return -EINVAL;
- }
-
- i2s_node = of_parse_phandle(snd_node, "samsung,i2s-controller", 0);
- if (!i2s_node) {
- dev_err(&pdev->dev,
- "Failed parsing samsung,i2s-controller property\n");
- ret = -EINVAL;
- goto err_put_codec_n;
- }
-
- odroidx2_dai[0].codec_of_node = codec_node;
- odroidx2_dai[0].cpu_of_node = i2s_node;
- odroidx2_dai[0].platform_of_node = i2s_node;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
- ret);
- goto err_put_i2s_n;
- }
- return 0;
-
-err_put_i2s_n:
- of_node_put(i2s_node);
-err_put_codec_n:
- of_node_put(codec_node);
- return ret;
-}
-
-static int odroidx2_audio_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- of_node_put(odroidx2_dai[0].cpu_of_node);
- of_node_put(odroidx2_dai[0].codec_of_node);
-
- return 0;
-}
-
-static struct platform_driver odroidx2_audio_driver = {
- .driver = {
- .name = "odroidx2-audio",
- .of_match_table = odroidx2_audio_of_match,
- .pm = &snd_soc_pm_ops,
- },
- .probe = odroidx2_audio_probe,
- .remove = odroidx2_audio_remove,
-};
-module_platform_driver(odroidx2_audio_driver);
-
-MODULE_AUTHOR("Chen Zhen <zhen1.chen@samsung.com>");
-MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
-MODULE_DESCRIPTION("ALSA SoC Odroid X2/U3 Audio Support");
-MODULE_LICENSE("GPL v2");
goto err5;
}
- ret = samsung_asoc_dma_platform_register(&pdev->dev, filter);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
+ NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err5;
iismod &= ~S3C2412_IISMOD_SLAVE;
break;
default:
- pr_err("unknwon master/slave format\n");
+ pr_err("unknown master/slave format\n");
return -EINVAL;
}
}
ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter);
+ pdata->dma_filter,
+ NULL, NULL);
if (ret)
pr_err("failed to register the DMA: %d\n", ret);
}
ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter);
+ pdata->dma_filter,
+ NULL, NULL);
if (ret)
pr_err("failed to register the dma: %d\n", ret);
spdif->dma_playback = &spdif_stereo_out;
- ret = samsung_asoc_dma_platform_register(&pdev->dev, filter);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
+ NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register DMA: %d\n", ret);
goto err4;
config SND_SOC_RSRC_CARD
tristate "Renesas Sampling Rate Convert Sound Card"
+ select SND_SIMPLE_CARD_UTILS
help
This option enables simple sound if you need sampling rate convert
struct clk *clkout[CLKOUTMAX];
struct clk_onecell_data onecell;
struct rsnd_mod mod;
+ u32 flags;
int rbga_rate_for_441khz; /* RBGA */
int rbgb_rate_for_48khz; /* RBGB */
};
+#define LRCLK_ASYNC (1 << 0)
+#define adg_mode_flags(adg) (adg->flags)
+
#define for_each_rsnd_clk(pos, adg, i) \
for (i = 0; \
(i < CLKMAX) && \
rsnd_adg_set_ssi_clk(ssi_mod, data);
+ if (!(adg_mode_flags(adg) & LRCLK_ASYNC)) {
+ struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
+ u32 ckr = 0;
+
+ if (0 == (rate % 8000))
+ ckr = 0x80000000;
+
+ rsnd_mod_bset(adg_mod, SSICKR, 0x80000000, ckr);
+ }
+
dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n",
rsnd_mod_name(ssi_mod), rsnd_mod_id(ssi_mod),
data, rate);
}
}
- rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr);
+ rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
rsnd_mod_write(adg_mod, BRRA, rbga);
rsnd_mod_write(adg_mod, BRRB, rbgb);
{
struct rsnd_adg *adg;
struct device *dev = rsnd_priv_to_dev(priv);
+ struct device_node *np = dev->of_node;
adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL);
if (!adg) {
rsnd_adg_get_clkin(priv, adg);
rsnd_adg_get_clkout(priv, adg);
+ if (of_get_property(np, "clkout-lr-asynchronous", NULL))
+ adg->flags = LRCLK_ASYNC;
+
priv->adg = adg;
return 0;
*/
static int rsnd_gen2_probe(struct rsnd_priv *priv)
{
- const static struct rsnd_regmap_field_conf conf_ssiu[] = {
+ static const struct rsnd_regmap_field_conf conf_ssiu[] = {
RSND_GEN_S_REG(SSI_MODE0, 0x800),
RSND_GEN_S_REG(SSI_MODE1, 0x804),
RSND_GEN_S_REG(SSI_MODE2, 0x808),
RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
};
- const static struct rsnd_regmap_field_conf conf_scu[] = {
+ static const struct rsnd_regmap_field_conf conf_scu[] = {
RSND_GEN_M_REG(SRC_I_BUSIF_MODE,0x0, 0x20),
RSND_GEN_M_REG(SRC_O_BUSIF_MODE,0x4, 0x20),
RSND_GEN_M_REG(SRC_BUSIF_DALIGN,0x8, 0x20),
RSND_GEN_M_REG(DVC_VOL7R, 0xe44, 0x100),
RSND_GEN_M_REG(DVC_DVUER, 0xe48, 0x100),
};
- const static struct rsnd_regmap_field_conf conf_adg[] = {
+ static const struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
RSND_GEN_S_REG(BRRB, 0x04),
RSND_GEN_S_REG(SSICKR, 0x08),
RSND_GEN_S_REG(SRCOUT_TIMSEL4, 0x58),
RSND_GEN_S_REG(CMDOUT_TIMSEL, 0x5c),
};
- const static struct rsnd_regmap_field_conf conf_ssi[] = {
+ static const struct rsnd_regmap_field_conf conf_ssi[] = {
RSND_GEN_M_REG(SSICR, 0x00, 0x40),
RSND_GEN_M_REG(SSISR, 0x04, 0x40),
RSND_GEN_M_REG(SSITDR, 0x08, 0x40),
static int rsnd_gen1_probe(struct rsnd_priv *priv)
{
- const static struct rsnd_regmap_field_conf conf_adg[] = {
+ static const struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
RSND_GEN_S_REG(BRRB, 0x04),
RSND_GEN_S_REG(SSICKR, 0x08),
RSND_GEN_S_REG(AUDIO_CLK_SEL0, 0x0c),
RSND_GEN_S_REG(AUDIO_CLK_SEL1, 0x10),
};
- const static struct rsnd_regmap_field_conf conf_ssi[] = {
+ static const struct rsnd_regmap_field_conf conf_ssi[] = {
RSND_GEN_M_REG(SSICR, 0x00, 0x40),
RSND_GEN_M_REG(SSISR, 0x04, 0x40),
RSND_GEN_M_REG(SSITDR, 0x08, 0x40),
#include <sound/jack.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
+#include <sound/simple_card_utils.h>
struct rsrc_card_of_data {
const char *prefix;
};
MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
-#define DAI_NAME_NUM 32
-struct rsrc_card_dai {
- unsigned int sysclk;
- unsigned int tx_slot_mask;
- unsigned int rx_slot_mask;
- int slots;
- int slot_width;
- struct clk *clk;
- char dai_name[DAI_NAME_NUM];
-};
-
#define IDX_CPU 0
#define IDX_CODEC 1
struct rsrc_card_priv {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
- struct rsrc_card_dai *dai_props;
+ struct asoc_simple_dai *dai_props;
struct snd_soc_dai_link *dai_link;
- int dai_num;
u32 convert_rate;
u32 convert_channels;
};
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct rsrc_card_dai *dai_props =
+ struct asoc_simple_dai *dai_props =
rsrc_priv_to_props(priv, rtd->num);
return clk_prepare_enable(dai_props->clk);
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct rsrc_card_dai *dai_props =
+ struct asoc_simple_dai *dai_props =
rsrc_priv_to_props(priv, rtd->num);
clk_disable_unprepare(dai_props->clk);
struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
- struct rsrc_card_dai *dai_props;
+ struct asoc_simple_dai *dai_props;
int num = rtd->num;
int ret;
return 0;
}
-static int rsrc_card_parse_daifmt(struct device_node *node,
- struct device_node *codec,
- struct rsrc_card_priv *priv,
- struct snd_soc_dai_link *dai_link,
- unsigned int *retfmt)
-{
- struct device_node *bitclkmaster = NULL;
- struct device_node *framemaster = NULL;
- unsigned int daifmt;
-
- daifmt = snd_soc_of_parse_daifmt(node, NULL,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
-
- if (!bitclkmaster && !framemaster)
- return -EINVAL;
-
- if (codec == bitclkmaster)
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
- else
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
-
- of_node_put(bitclkmaster);
- of_node_put(framemaster);
-
- *retfmt = daifmt;
-
- return 0;
-}
-
static int rsrc_card_parse_links(struct device_node *np,
struct rsrc_card_priv *priv,
int idx, bool is_fe)
{
+ struct device *dev = rsrc_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct rsrc_card_dai *dai_props = rsrc_priv_to_props(priv, idx);
+ struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
struct of_phandle_args args;
int ret;
if (ret < 0)
return ret;
- /* set dai_name */
- snprintf(dai_props->dai_name, DAI_NAME_NUM, "fe.%s",
- dai_link->cpu_dai_name);
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+ return ret;
/*
* In soc_bind_dai_link() will check cpu name after
if (!args.args_count)
dai_link->cpu_dai_name = NULL;
} else {
- struct device *dev = rsrc_priv_to_dev(priv);
const struct rsrc_card_of_data *of_data;
of_data = of_device_get_match_data(dev);
if (ret < 0)
return ret;
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "be.%s",
+ dai_link->codec_dai_name);
+ if (ret < 0)
+ return ret;
+
/* additional name prefix */
if (of_data) {
priv->codec_conf.of_node = dai_link->codec_of_node;
dai_link->codec_of_node,
"audio-prefix");
}
-
- /* set dai_name */
- snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s",
- dai_link->codec_dai_name);
}
/* Simple Card assumes platform == cpu */
dai_link->platform_of_node = dai_link->cpu_of_node;
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
- dai_link->name = dai_props->dai_name;
- dai_link->stream_name = dai_props->dai_name;
dai_link->ops = &rsrc_card_ops;
dai_link->init = rsrc_card_dai_init;
int idx, bool is_fe)
{
struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct rsrc_card_dai *dai_props = rsrc_priv_to_props(priv, idx);
+ struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
struct clk *clk;
struct device_node *of_np = is_fe ? dai_link->cpu_of_node :
dai_link->codec_of_node;
{
struct device *dev = rsrc_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct rsrc_card_dai *dai_props = rsrc_priv_to_props(priv, idx);
+ struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
int ret;
ret = rsrc_card_parse_links(np, priv, idx, is_fe);
return ret;
dev_dbg(dev, "\t%s / %04x / %d\n",
- dai_props->dai_name,
+ dai_link->name,
dai_link->dai_fmt,
dai_props->sysclk);
static int rsrc_card_dai_link_of(struct device_node *node,
struct rsrc_card_priv *priv)
{
+ struct device *dev = rsrc_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link;
struct device_node *np;
unsigned int daifmt = 0;
dai_link = rsrc_priv_to_link(priv, i);
if (strcmp(np->name, "codec") == 0) {
- ret = rsrc_card_parse_daifmt(node, np, priv,
- dai_link, &daifmt);
+ ret = asoc_simple_card_parse_daifmt(dev, node, np,
+ NULL, &daifmt);
if (ret < 0)
return ret;
break;
struct device *dev)
{
const struct rsrc_card_of_data *of_data = of_device_get_match_data(dev);
- struct rsrc_card_dai *props;
+ struct asoc_simple_dai *props;
struct snd_soc_dai_link *links;
int ret;
int num;
priv->dai_props = props;
priv->dai_link = links;
- priv->dai_num = num;
/* Init snd_soc_card */
priv->snd_card.owner = THIS_MODULE;
"audio-routing");
}
- /* Parse the card name from DT */
- snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
-
/* sampling rate convert */
of_property_read_u32(node, "convert-rate", &priv->convert_rate);
if (ret < 0)
return ret;
- if (!priv->snd_card.name)
- priv->snd_card.name = priv->snd_card.dai_link->name;
+ ret = asoc_simple_card_parse_card_name(&priv->snd_card, "card-");
+ if (ret < 0)
+ return ret;
return 0;
}
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
if (platform->driver->compr_ops && platform->driver->compr_ops->pointer)
- platform->driver->compr_ops->pointer(cstream, tstamp);
+ ret = platform->driver->compr_ops->pointer(cstream, tstamp);
mutex_unlock(&rtd->pcm_mutex);
- return 0;
+ return ret;
}
static int soc_compr_copy(struct snd_compr_stream *cstream,
*/
static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
struct list_head *list, enum snd_soc_dapm_direction dir,
- int (*fn)(struct snd_soc_dapm_widget *, struct list_head *))
+ int (*fn)(struct snd_soc_dapm_widget *, struct list_head *,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction)),
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction))
{
enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
struct snd_soc_dapm_path *path;
if (list)
list_add_tail(&widget->work_list, list);
+ if (custom_stop_condition && custom_stop_condition(widget, dir)) {
+ widget->endpoints[dir] = 1;
+ return widget->endpoints[dir];
+ }
+
if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget);
return widget->endpoints[dir];
if (path->connect) {
path->walking = 1;
- con += fn(path->node[dir], list);
+ con += fn(path->node[dir], list, custom_stop_condition);
path->walking = 0;
}
}
/*
* Recursively check for a completed path to an active or physically connected
* output widget. Returns number of complete paths.
+ *
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+ * direction as an arguments, it should return true if the walk should be
+ * stopped and false otherwise.
*/
static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
- struct list_head *list)
+ struct list_head *list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *i,
+ enum snd_soc_dapm_direction))
{
return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT,
- is_connected_output_ep);
+ is_connected_output_ep, custom_stop_condition);
}
/*
* Recursively check for a completed path to an active or physically connected
* input widget. Returns number of complete paths.
+ *
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+ * direction as an arguments, it should return true if the walk should be
+ * stopped and false otherwise.
*/
static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
- struct list_head *list)
+ struct list_head *list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *i,
+ enum snd_soc_dapm_direction))
{
return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN,
- is_connected_input_ep);
+ is_connected_input_ep, custom_stop_condition);
}
/**
* @dai: the soc DAI.
* @stream: stream direction.
* @list: list of active widgets for this stream.
+ * @custom_stop_condition: (optional) a function meant to stop the widget graph
+ * walk based on custom logic.
*
* Queries DAPM graph as to whether an valid audio stream path exists for
* the initial stream specified by name. This takes into account
* current mixer and mux kcontrol settings. Creates list of valid widgets.
*
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+ * direction as an arguments, it should return true if the walk should be
+ * stopped and false otherwise.
+ *
* Returns the number of valid paths or negative error.
*/
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
- struct snd_soc_dapm_widget_list **list)
+ struct snd_soc_dapm_widget_list **list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction))
{
struct snd_soc_card *card = dai->component->card;
struct snd_soc_dapm_widget *w;
}
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- paths = is_connected_output_ep(dai->playback_widget, &widgets);
+ paths = is_connected_output_ep(dai->playback_widget, &widgets,
+ custom_stop_condition);
else
- paths = is_connected_input_ep(dai->capture_widget, &widgets);
+ paths = is_connected_input_ep(dai->capture_widget, &widgets,
+ custom_stop_condition);
/* Drop starting point */
list_del(widgets.next);
DAPM_UPDATE_STAT(w, power_checks);
- in = is_connected_input_ep(w, NULL);
- out = is_connected_output_ep(w, NULL);
+ in = is_connected_input_ep(w, NULL, NULL);
+ out = is_connected_output_ep(w, NULL, NULL);
return out != 0 && in != 0;
}
in = 0;
out = 0;
} else {
- in = is_connected_input_ep(w, NULL);
- out = is_connected_output_ep(w, NULL);
+ in = is_connected_input_ep(w, NULL, NULL);
+ out = is_connected_output_ep(w, NULL, NULL);
}
ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
mutex_unlock(&dapm->card->dapm_mutex);
return w;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control);
struct snd_soc_dapm_widget *
snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
return 0;
}
+static bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget,
+ enum snd_soc_dapm_direction dir)
+{
+ struct snd_soc_card *card = widget->dapm->card;
+ struct snd_soc_pcm_runtime *rtd;
+ int i;
+
+ if (dir == SND_SOC_DAPM_DIR_OUT) {
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!rtd->dai_link->no_pcm)
+ continue;
+
+ if (rtd->cpu_dai->playback_widget == widget)
+ return true;
+
+ for (i = 0; i < rtd->num_codecs; ++i) {
+ struct snd_soc_dai *dai = rtd->codec_dais[i];
+ if (dai->playback_widget == widget)
+ return true;
+ }
+ }
+ } else { /* SND_SOC_DAPM_DIR_IN */
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!rtd->dai_link->no_pcm)
+ continue;
+
+ if (rtd->cpu_dai->capture_widget == widget)
+ return true;
+
+ for (i = 0; i < rtd->num_codecs; ++i) {
+ struct snd_soc_dai *dai = rtd->codec_dais[i];
+ if (dai->capture_widget == widget)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list)
{
int paths;
/* get number of valid DAI paths and their widgets */
- paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list);
+ paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list,
+ dpcm_end_walk_at_be);
dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
stream ? "capture" : "playback");
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
- if (!regmap) {
+ if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
- return -EINVAL;
+ return PTR_ERR(regmap);
}
player->clk_sel = regmap_field_alloc(regmap, regfield[0]);
Select Y or M to add support for the Codec embedded in the Allwinner
A10 and affiliated SoCs.
+config SND_SUN4I_I2S
+ tristate "Allwinner A10 I2S Support"
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select REGMAP_MMIO
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the Allwinner A10 I2S. You will also need to select the
+ individual machine drivers to support below.
+
config SND_SUN4I_SPDIF
tristate "Allwinner A10 SPDIF Support"
depends on OF
obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o
-
+obj-$(CONFIG_SND_SUN4I_I2S) += sun4i-i2s.o
obj-$(CONFIG_SND_SUN4I_SPDIF) += sun4i-spdif.o
--- /dev/null
+/*
+ * Copyright (C) 2015 Andrea Venturi
+ * Andrea Venturi <be17068@iperbole.bo.it>
+ *
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#define SUN4I_I2S_CTRL_REG 0x00
+#define SUN4I_I2S_CTRL_SDO_EN_MASK GENMASK(11, 8)
+#define SUN4I_I2S_CTRL_SDO_EN(sdo) BIT(8 + (sdo))
+#define SUN4I_I2S_CTRL_MODE_MASK BIT(5)
+#define SUN4I_I2S_CTRL_MODE_SLAVE (1 << 5)
+#define SUN4I_I2S_CTRL_MODE_MASTER (0 << 5)
+#define SUN4I_I2S_CTRL_TX_EN BIT(2)
+#define SUN4I_I2S_CTRL_RX_EN BIT(1)
+#define SUN4I_I2S_CTRL_GL_EN BIT(0)
+
+#define SUN4I_I2S_FMT0_REG 0x04
+#define SUN4I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(7)
+#define SUN4I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 7)
+#define SUN4I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 7)
+#define SUN4I_I2S_FMT0_BCLK_POLARITY_MASK BIT(6)
+#define SUN4I_I2S_FMT0_BCLK_POLARITY_INVERTED (1 << 6)
+#define SUN4I_I2S_FMT0_BCLK_POLARITY_NORMAL (0 << 6)
+#define SUN4I_I2S_FMT0_SR_MASK GENMASK(5, 4)
+#define SUN4I_I2S_FMT0_SR(sr) ((sr) << 4)
+#define SUN4I_I2S_FMT0_WSS_MASK GENMASK(3, 2)
+#define SUN4I_I2S_FMT0_WSS(wss) ((wss) << 2)
+#define SUN4I_I2S_FMT0_FMT_MASK GENMASK(1, 0)
+#define SUN4I_I2S_FMT0_FMT_RIGHT_J (2 << 0)
+#define SUN4I_I2S_FMT0_FMT_LEFT_J (1 << 0)
+#define SUN4I_I2S_FMT0_FMT_I2S (0 << 0)
+
+#define SUN4I_I2S_FMT1_REG 0x08
+#define SUN4I_I2S_FIFO_TX_REG 0x0c
+#define SUN4I_I2S_FIFO_RX_REG 0x10
+
+#define SUN4I_I2S_FIFO_CTRL_REG 0x14
+#define SUN4I_I2S_FIFO_CTRL_FLUSH_TX BIT(25)
+#define SUN4I_I2S_FIFO_CTRL_FLUSH_RX BIT(24)
+#define SUN4I_I2S_FIFO_CTRL_TX_MODE_MASK BIT(2)
+#define SUN4I_I2S_FIFO_CTRL_TX_MODE(mode) ((mode) << 2)
+#define SUN4I_I2S_FIFO_CTRL_RX_MODE_MASK GENMASK(1, 0)
+#define SUN4I_I2S_FIFO_CTRL_RX_MODE(mode) (mode)
+
+#define SUN4I_I2S_FIFO_STA_REG 0x18
+
+#define SUN4I_I2S_DMA_INT_CTRL_REG 0x1c
+#define SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN BIT(7)
+#define SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN BIT(3)
+
+#define SUN4I_I2S_INT_STA_REG 0x20
+
+#define SUN4I_I2S_CLK_DIV_REG 0x24
+#define SUN4I_I2S_CLK_DIV_MCLK_EN BIT(7)
+#define SUN4I_I2S_CLK_DIV_BCLK_MASK GENMASK(6, 4)
+#define SUN4I_I2S_CLK_DIV_BCLK(bclk) ((bclk) << 4)
+#define SUN4I_I2S_CLK_DIV_MCLK_MASK GENMASK(3, 0)
+#define SUN4I_I2S_CLK_DIV_MCLK(mclk) ((mclk) << 0)
+
+#define SUN4I_I2S_RX_CNT_REG 0x28
+#define SUN4I_I2S_TX_CNT_REG 0x2c
+
+#define SUN4I_I2S_TX_CHAN_SEL_REG 0x30
+#define SUN4I_I2S_TX_CHAN_SEL(num_chan) (((num_chan) - 1) << 0)
+
+#define SUN4I_I2S_TX_CHAN_MAP_REG 0x34
+#define SUN4I_I2S_TX_CHAN_MAP(chan, sample) ((sample) << (chan << 2))
+
+#define SUN4I_I2S_RX_CHAN_SEL_REG 0x38
+#define SUN4I_I2S_RX_CHAN_MAP_REG 0x3c
+
+struct sun4i_i2s {
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct regmap *regmap;
+
+ struct snd_dmaengine_dai_dma_data playback_dma_data;
+};
+
+struct sun4i_i2s_clk_div {
+ u8 div;
+ u8 val;
+};
+
+static const struct sun4i_i2s_clk_div sun4i_i2s_bclk_div[] = {
+ { .div = 2, .val = 0 },
+ { .div = 4, .val = 1 },
+ { .div = 6, .val = 2 },
+ { .div = 8, .val = 3 },
+ { .div = 12, .val = 4 },
+ { .div = 16, .val = 5 },
+};
+
+static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
+ { .div = 1, .val = 0 },
+ { .div = 2, .val = 1 },
+ { .div = 4, .val = 2 },
+ { .div = 6, .val = 3 },
+ { .div = 8, .val = 4 },
+ { .div = 12, .val = 5 },
+ { .div = 16, .val = 6 },
+ { .div = 24, .val = 7 },
+};
+
+static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
+ unsigned int oversample_rate,
+ unsigned int word_size)
+{
+ int div = oversample_rate / word_size / 2;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
+ const struct sun4i_i2s_clk_div *bdiv = &sun4i_i2s_bclk_div[i];
+
+ if (bdiv->div == div)
+ return bdiv->val;
+ }
+
+ return -EINVAL;
+}
+
+static int sun4i_i2s_get_mclk_div(struct sun4i_i2s *i2s,
+ unsigned int oversample_rate,
+ unsigned int module_rate,
+ unsigned int sampling_rate)
+{
+ int div = module_rate / sampling_rate / oversample_rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_mclk_div); i++) {
+ const struct sun4i_i2s_clk_div *mdiv = &sun4i_i2s_mclk_div[i];
+
+ if (mdiv->div == div)
+ return mdiv->val;
+ }
+
+ return -EINVAL;
+}
+
+static int sun4i_i2s_oversample_rates[] = { 128, 192, 256, 384, 512, 768 };
+
+static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
+ unsigned int rate,
+ unsigned int word_size)
+{
+ unsigned int clk_rate;
+ int bclk_div, mclk_div;
+ int ret, i;
+
+ switch (rate) {
+ case 176400:
+ case 88200:
+ case 44100:
+ case 22050:
+ case 11025:
+ clk_rate = 22579200;
+ break;
+
+ case 192000:
+ case 128000:
+ case 96000:
+ case 64000:
+ case 48000:
+ case 32000:
+ case 24000:
+ case 16000:
+ case 12000:
+ case 8000:
+ clk_rate = 24576000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(i2s->mod_clk, clk_rate);
+ if (ret)
+ return ret;
+
+ /* Always favor the highest oversampling rate */
+ for (i = (ARRAY_SIZE(sun4i_i2s_oversample_rates) - 1); i >= 0; i--) {
+ unsigned int oversample_rate = sun4i_i2s_oversample_rates[i];
+
+ bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
+ word_size);
+ mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
+ clk_rate,
+ rate);
+
+ if ((bclk_div >= 0) && (mclk_div >= 0))
+ break;
+ }
+
+ if ((bclk_div < 0) || (mclk_div < 0))
+ return -EINVAL;
+
+ regmap_write(i2s->regmap, SUN4I_I2S_CLK_DIV_REG,
+ SUN4I_I2S_CLK_DIV_BCLK(bclk_div) |
+ SUN4I_I2S_CLK_DIV_MCLK(mclk_div) |
+ SUN4I_I2S_CLK_DIV_MCLK_EN);
+
+ return 0;
+}
+
+static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ int sr, wss;
+ u32 width;
+
+ if (params_channels(params) != 2)
+ return -EINVAL;
+
+ switch (params_physical_width(params)) {
+ case 16:
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ i2s->playback_dma_data.addr_width = width;
+
+ switch (params_width(params)) {
+ case 16:
+ sr = 0;
+ wss = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN4I_I2S_FMT0_WSS_MASK | SUN4I_I2S_FMT0_SR_MASK,
+ SUN4I_I2S_FMT0_WSS(wss) | SUN4I_I2S_FMT0_SR(sr));
+
+ return sun4i_i2s_set_clk_rate(i2s, params_rate(params),
+ params_width(params));
+}
+
+static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ u32 val;
+
+ /* DAI Mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ val = SUN4I_I2S_FMT0_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = SUN4I_I2S_FMT0_FMT_LEFT_J;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = SUN4I_I2S_FMT0_FMT_RIGHT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN4I_I2S_FMT0_FMT_MASK,
+ val);
+
+ /* DAI clock polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Invert both clocks */
+ val = SUN4I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Invert bit clock */
+ val = SUN4I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_NORMAL;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Invert frame clock */
+ val = SUN4I_I2S_FMT0_LRCLK_POLARITY_INVERTED |
+ SUN4I_I2S_FMT0_BCLK_POLARITY_NORMAL;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ /* Nothing to do for both normal cases */
+ val = SUN4I_I2S_FMT0_BCLK_POLARITY_NORMAL |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN4I_I2S_FMT0_BCLK_POLARITY_MASK |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_MASK,
+ val);
+
+ /* DAI clock master masks */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ /* BCLK and LRCLK master */
+ val = SUN4I_I2S_CTRL_MODE_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* BCLK and LRCLK slave */
+ val = SUN4I_I2S_CTRL_MODE_SLAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_MODE_MASK,
+ val);
+
+ /* Set significant bits in our FIFOs */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FIFO_CTRL_REG,
+ SUN4I_I2S_FIFO_CTRL_TX_MODE_MASK |
+ SUN4I_I2S_FIFO_CTRL_RX_MODE_MASK,
+ SUN4I_I2S_FIFO_CTRL_TX_MODE(1) |
+ SUN4I_I2S_FIFO_CTRL_RX_MODE(1));
+ return 0;
+}
+
+static void sun4i_i2s_start_playback(struct sun4i_i2s *i2s)
+{
+ /* Flush TX FIFO */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FIFO_CTRL_REG,
+ SUN4I_I2S_FIFO_CTRL_FLUSH_TX,
+ SUN4I_I2S_FIFO_CTRL_FLUSH_TX);
+
+ /* Clear TX counter */
+ regmap_write(i2s->regmap, SUN4I_I2S_TX_CNT_REG, 0);
+
+ /* Enable TX Block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_TX_EN,
+ SUN4I_I2S_CTRL_TX_EN);
+
+ /* Enable TX DRQ */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+ SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN,
+ SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN);
+}
+
+
+static void sun4i_i2s_stop_playback(struct sun4i_i2s *i2s)
+{
+ /* Disable TX Block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_TX_EN,
+ 0);
+
+ /* Disable TX DRQ */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+ SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN,
+ 0);
+}
+
+static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sun4i_i2s_start_playback(i2s);
+ else
+ return -EINVAL;
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sun4i_i2s_stop_playback(i2s);
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sun4i_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ /* Enable the whole hardware block */
+ regmap_write(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_GL_EN);
+
+ /* Enable the first output line */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK,
+ SUN4I_I2S_CTRL_SDO_EN(0));
+
+ /* Enable the first two channels */
+ regmap_write(i2s->regmap, SUN4I_I2S_TX_CHAN_SEL_REG,
+ SUN4I_I2S_TX_CHAN_SEL(2));
+
+ /* Map them to the two first samples coming in */
+ regmap_write(i2s->regmap, SUN4I_I2S_TX_CHAN_MAP_REG,
+ SUN4I_I2S_TX_CHAN_MAP(0, 0) | SUN4I_I2S_TX_CHAN_MAP(1, 1));
+
+ return clk_prepare_enable(i2s->mod_clk);
+}
+
+static void sun4i_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(i2s->mod_clk);
+
+ /* Disable our output lines */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK, 0);
+
+ /* Disable the whole hardware block */
+ regmap_write(i2s->regmap, SUN4I_I2S_CTRL_REG, 0);
+}
+
+static const struct snd_soc_dai_ops sun4i_i2s_dai_ops = {
+ .hw_params = sun4i_i2s_hw_params,
+ .set_fmt = sun4i_i2s_set_fmt,
+ .shutdown = sun4i_i2s_shutdown,
+ .startup = sun4i_i2s_startup,
+ .trigger = sun4i_i2s_trigger,
+};
+
+static int sun4i_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data, NULL);
+
+ snd_soc_dai_set_drvdata(dai, i2s);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver sun4i_i2s_dai = {
+ .probe = sun4i_i2s_dai_probe,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &sun4i_i2s_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static const struct snd_soc_component_driver sun4i_i2s_component = {
+ .name = "sun4i-dai",
+};
+
+static bool sun4i_i2s_rd_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SUN4I_I2S_FIFO_TX_REG:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+static bool sun4i_i2s_wr_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SUN4I_I2S_FIFO_RX_REG:
+ case SUN4I_I2S_FIFO_STA_REG:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+static bool sun4i_i2s_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SUN4I_I2S_FIFO_RX_REG:
+ case SUN4I_I2S_INT_STA_REG:
+ case SUN4I_I2S_RX_CNT_REG:
+ case SUN4I_I2S_TX_CNT_REG:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default sun4i_i2s_reg_defaults[] = {
+ { SUN4I_I2S_CTRL_REG, 0x00000000 },
+ { SUN4I_I2S_FMT0_REG, 0x0000000c },
+ { SUN4I_I2S_FMT1_REG, 0x00004020 },
+ { SUN4I_I2S_FIFO_CTRL_REG, 0x000400f0 },
+ { SUN4I_I2S_DMA_INT_CTRL_REG, 0x00000000 },
+ { SUN4I_I2S_CLK_DIV_REG, 0x00000000 },
+ { SUN4I_I2S_TX_CHAN_SEL_REG, 0x00000001 },
+ { SUN4I_I2S_TX_CHAN_MAP_REG, 0x76543210 },
+ { SUN4I_I2S_RX_CHAN_SEL_REG, 0x00000001 },
+ { SUN4I_I2S_RX_CHAN_MAP_REG, 0x00003210 },
+};
+
+static const struct regmap_config sun4i_i2s_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN4I_I2S_RX_CHAN_MAP_REG,
+
+ .cache_type = REGCACHE_FLAT,
+ .reg_defaults = sun4i_i2s_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sun4i_i2s_reg_defaults),
+ .writeable_reg = sun4i_i2s_wr_reg,
+ .readable_reg = sun4i_i2s_rd_reg,
+ .volatile_reg = sun4i_i2s_volatile_reg,
+};
+
+static int sun4i_i2s_runtime_resume(struct device *dev)
+{
+ struct sun4i_i2s *i2s = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2s->bus_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable bus clock\n");
+ return ret;
+ }
+
+ regcache_cache_only(i2s->regmap, false);
+ regcache_mark_dirty(i2s->regmap);
+
+ ret = regcache_sync(i2s->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to sync regmap cache\n");
+ goto err_disable_clk;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2s->bus_clk);
+ return ret;
+}
+
+static int sun4i_i2s_runtime_suspend(struct device *dev)
+{
+ struct sun4i_i2s *i2s = dev_get_drvdata(dev);
+
+ regcache_cache_only(i2s->regmap, true);
+
+ clk_disable_unprepare(i2s->bus_clk);
+
+ return 0;
+}
+
+static int sun4i_i2s_probe(struct platform_device *pdev)
+{
+ struct sun4i_i2s *i2s;
+ struct resource *res;
+ void __iomem *regs;
+ int irq, ret;
+
+ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ if (!i2s)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, i2s);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Can't retrieve our interrupt\n");
+ return irq;
+ }
+
+ i2s->bus_clk = devm_clk_get(&pdev->dev, "apb");
+ if (IS_ERR(i2s->bus_clk)) {
+ dev_err(&pdev->dev, "Can't get our bus clock\n");
+ return PTR_ERR(i2s->bus_clk);
+ }
+
+ i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &sun4i_i2s_regmap_config);
+ if (IS_ERR(i2s->regmap)) {
+ dev_err(&pdev->dev, "Regmap initialisation failed\n");
+ return PTR_ERR(i2s->regmap);
+ }
+
+ i2s->mod_clk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(i2s->mod_clk)) {
+ dev_err(&pdev->dev, "Can't get our mod clock\n");
+ return PTR_ERR(i2s->mod_clk);
+ }
+
+ i2s->playback_dma_data.addr = res->start + SUN4I_I2S_FIFO_TX_REG;
+ i2s->playback_dma_data.maxburst = 4;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = sun4i_i2s_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &sun4i_i2s_component,
+ &sun4i_i2s_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI\n");
+ goto err_suspend;
+ }
+
+ ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register PCM\n");
+ goto err_suspend;
+ }
+
+ return 0;
+
+err_suspend:
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ sun4i_i2s_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int sun4i_i2s_remove(struct platform_device *pdev)
+{
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ sun4i_i2s_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_i2s_match[] = {
+ { .compatible = "allwinner,sun4i-a10-i2s", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_i2s_match);
+
+static const struct dev_pm_ops sun4i_i2s_pm_ops = {
+ .runtime_resume = sun4i_i2s_runtime_resume,
+ .runtime_suspend = sun4i_i2s_runtime_suspend,
+};
+
+static struct platform_driver sun4i_i2s_driver = {
+ .probe = sun4i_i2s_probe,
+ .remove = sun4i_i2s_remove,
+ .driver = {
+ .name = "sun4i-i2s",
+ .of_match_table = sun4i_i2s_match,
+ .pm = &sun4i_i2s_pm_ops,
+ },
+};
+module_platform_driver(sun4i_i2s_driver);
+
+MODULE_AUTHOR("Andrea Venturi <be17068@iperbole.bo.it>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 I2S driver");
+MODULE_LICENSE("GPL");
goto __error;
}
chip = usb_chip[i];
- dev_set_drvdata(&dev->dev, chip);
atomic_inc(&chip->active); /* avoid autopm */
break;
}
goto __error;
}
}
+ dev_set_drvdata(&dev->dev, chip);
/*
* For devices with more than one control interface, we assume the
/* check for STACK_FRAME_NON_STANDARD */
if (file->whitelist && file->whitelist->rela)
- list_for_each_entry(rela, &file->whitelist->rela->rela_list, list)
- if (rela->sym->sec == func->sec &&
+ list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+ if (rela->sym->type == STT_SECTION &&
+ rela->sym->sec == func->sec &&
rela->addend == func->offset)
return true;
+ if (rela->sym->type == STT_FUNC && rela->sym == func)
+ return true;
+ }
/* check if it has a context switching instruction */
func_for_each_insn(file, func, insn)
return (value_int & value_mask) | ~value_mask;
}
+static int string_set_value(struct bt_ctf_field *field, const char *string)
+{
+ char *buffer = NULL;
+ size_t len = strlen(string), i, p;
+ int err;
+
+ for (i = p = 0; i < len; i++, p++) {
+ if (isprint(string[i])) {
+ if (!buffer)
+ continue;
+ buffer[p] = string[i];
+ } else {
+ char numstr[5];
+
+ snprintf(numstr, sizeof(numstr), "\\x%02x",
+ (unsigned int)(string[i]) & 0xff);
+
+ if (!buffer) {
+ buffer = zalloc(i + (len - i) * 4 + 2);
+ if (!buffer) {
+ pr_err("failed to set unprintable string '%s'\n", string);
+ return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
+ }
+ if (i > 0)
+ strncpy(buffer, string, i);
+ }
+ strncat(buffer + p, numstr, 4);
+ p += 3;
+ }
+ }
+
+ if (!buffer)
+ return bt_ctf_field_string_set_value(field, string);
+ err = bt_ctf_field_string_set_value(field, buffer);
+ free(buffer);
+ return err;
+}
+
static int add_tracepoint_field_value(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
}
if (flags & FIELD_IS_STRING)
- ret = bt_ctf_field_string_set_value(field,
- data + offset + i * len);
+ ret = string_set_value(field, data + offset + i * len);
else {
unsigned long long value_int;
int err;
union perf_event *event;
+ if (symbol_conf.kptr_restrict)
+ return -1;
if (map == NULL)
return -1;
static bool symbol__read_kptr_restrict(void)
{
bool value = false;
+ FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
- if (geteuid() != 0) {
- FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
- if (fp != NULL) {
- char line[8];
+ if (fp != NULL) {
+ char line[8];
- if (fgets(line, sizeof(line), fp) != NULL)
- value = atoi(line) != 0;
+ if (fgets(line, sizeof(line), fp) != NULL)
+ value = (geteuid() != 0) ?
+ (atoi(line) != 0) :
+ (atoi(line) == 2);
- fclose(fp);
- }
+ fclose(fp);
}
return value;
NODE_TAGGED = 2,
};
-#define THRASH_SIZE 1000 * 1000
+#define THRASH_SIZE (1000 * 1000)
#define N 127
#define BATCH 33
exit_unsupported
fi
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
echo "hist trigger is not supported"
exit_unsupported
fi
+reset_tracer
+do_reset
+
echo "Test histogram with execname modifier"
echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
exit_unsupported
fi
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
echo "hist trigger is not supported"
exit_unsupported
fi
+reset_tracer
+do_reset
+
echo "Test histogram basic tigger"
echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
exit_unsupported
fi
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
echo "hist trigger is not supported"
exit_unsupported
fi
+reset_tracer
+do_reset
+
reset_trigger
echo "Test histogram multiple tiggers"
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insn_cnt = ARRAY_SIZE(prog);
- attr.insns = (uint64_t)prog;
- attr.license = (uint64_t)bpf_license;
- attr.log_buf = (uint64_t)bpf_log_buf;
+ attr.insns = (unsigned long) &prog;
+ attr.license = (unsigned long) &bpf_license;
+ attr.log_buf = (unsigned long) &bpf_log_buf;
attr.log_size = sizeof(bpf_log_buf);
attr.log_level = 1;
attr.kern_version = 0;
memset(&eprog, 0, sizeof(eprog));
eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
eprog.insn_cnt = ARRAY_SIZE(ecode);
- eprog.insns = (uint64_t)ecode;
- eprog.license = (uint64_t)bpf_license;
+ eprog.insns = (unsigned long) &ecode;
+ eprog.license = (unsigned long) &bpf_license;
eprog.kern_version = 0;
memset(&cprog, 0, sizeof(cprog));
printf("No of huge pages allocated = %d\n",
(atoi(nr_hugepages)));
- if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
+ if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
all:
-all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder
+all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring
CFLAGS += -Wall
CFLAGS += -pthread -O2 -ggdb
virtio_ring_0_9: virtio_ring_0_9.o main.o
virtio_ring_poll: virtio_ring_poll.o main.o
virtio_ring_inorder: virtio_ring_inorder.o main.o
+noring: noring.o main.o
clean:
-rm main.o
-rm ring.o ring
-rm virtio_ring_0_9.o virtio_ring_0_9
-rm virtio_ring_poll.o virtio_ring_poll
-rm virtio_ring_inorder.o virtio_ring_inorder
+ -rm noring.o noring
.PHONY: all clean
Partial implementation of various ring layouts, useful to tune virtio design.
Uses shared memory heavily.
+
+Typical use:
+
+# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
--- /dev/null
+#define _GNU_SOURCE
+#include "main.h"
+#include <assert.h>
+
+/* stub implementation: useful for measuring overhead */
+void alloc_ring(void)
+{
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+ return 0;
+}
+
+/*
+ * skb_array API provides no way for producer to find out whether a given
+ * buffer was consumed. Our tests merely require that a successful get_buf
+ * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
+ * fake it accordingly.
+ */
+void *get_buf(unsigned *lenp, void **bufp)
+{
+ return "Buffer";
+}
+
+void poll_used(void)
+{
+}
+
+void disable_call()
+{
+ assert(0);
+}
+
+bool enable_call()
+{
+ assert(0);
+}
+
+void kick_available(void)
+{
+ assert(0);
+}
+
+/* host side */
+void disable_kick()
+{
+ assert(0);
+}
+
+bool enable_kick()
+{
+ assert(0);
+}
+
+void poll_avail(void)
+{
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+ return true;
+}
+
+void call_used(void)
+{
+ assert(0);
+}
#use last CPU for host. Why not the first?
#many devices tend to use cpu0 by default so
#it tends to be busier
-HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
#run command on all cpus
-for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+for cpu in $(seq 0 $HOST_AFFINITY)
do
#Don't run guest and host on same CPU
#It actually works ok if using signalling
s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
if (total) {
- printf("\nSlab Deactivation Ocurrences %%\n");
+ printf("\nSlab Deactivation Occurrences %%\n");
printf("-------------------------------------------------\n");
printf("Slab full %7lu %3lu%%\n",
s->deactivate_full, (s->deactivate_full * 100) / total);
s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
}
- if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail)
+ if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
printf("\nCmpxchg_double Looping\n------------------------\n");
printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n",
s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
+ }
}
static void report(struct slabinfo *s)
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
continue;
- if (cpu_if->vgic_elrsr & (1UL << i)) {
+ if (cpu_if->vgic_elrsr & (1UL << i))
cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
- continue;
- }
+ else
+ cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
- cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
writel_relaxed(0, base + GICH_LR0 + (i * 4));
}
}
* other thread sync back the IRQ.
*/
while (irq->vcpu && /* IRQ may have state in an LR somewhere */
- irq->vcpu->cpu != -1) { /* VCPU thread is running */
- BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
+ irq->vcpu->cpu != -1) /* VCPU thread is running */
cond_resched_lock(&irq->irq_lock);
- }
irq->active = new_active_state;
if (new_active_state)
}
}
- /* Clear soft pending state when level IRQs have been acked */
- if (irq->config == VGIC_CONFIG_LEVEL &&
- !(val & GICH_LR_PENDING_BIT)) {
- irq->soft_pending = false;
- irq->pending = irq->line_level;
+ /*
+ * Clear soft pending state when level irqs have been acked.
+ * Always regenerate the pending state.
+ */
+ if (irq->config == VGIC_CONFIG_LEVEL) {
+ if (!(val & GICH_LR_PENDING_BIT))
+ irq->soft_pending = false;
+
+ irq->pending = irq->line_level || irq->soft_pending;
}
spin_unlock(&irq->irq_lock);
}
}
- /* Clear soft pending state when level irqs have been acked */
- if (irq->config == VGIC_CONFIG_LEVEL &&
- !(val & ICH_LR_PENDING_BIT)) {
- irq->soft_pending = false;
- irq->pending = irq->line_level;
+ /*
+ * Clear soft pending state when level irqs have been acked.
+ * Always regenerate the pending state.
+ */
+ if (irq->config == VGIC_CONFIG_LEVEL) {
+ if (!(val & ICH_LR_PENDING_BIT))
+ irq->soft_pending = false;
+
+ irq->pending = irq->line_level || irq->soft_pending;
}
spin_unlock(&irq->irq_lock);
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
lockdep_is_held(&kvm->irq_lock));
- if (gsi < irq_rt->nr_rt_entries) {
+ if (irq_rt && gsi < irq_rt->nr_rt_entries) {
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
entries[n] = *e;
++n;
case KVM_SET_GSI_ROUTING: {
struct kvm_irq_routing routing;
struct kvm_irq_routing __user *urouting;
- struct kvm_irq_routing_entry *entries;
+ struct kvm_irq_routing_entry *entries = NULL;
r = -EFAULT;
if (copy_from_user(&routing, argp, sizeof(routing)))
goto out;
r = -EINVAL;
- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+ if (routing.nr > KVM_MAX_IRQ_ROUTES)
goto out;
if (routing.flags)
goto out;
- r = -ENOMEM;
- entries = vmalloc(routing.nr * sizeof(*entries));
- if (!entries)
- goto out;
- r = -EFAULT;
- urouting = argp;
- if (copy_from_user(entries, urouting->entries,
- routing.nr * sizeof(*entries)))
- goto out_free_irq_routing;
+ if (routing.nr) {
+ r = -ENOMEM;
+ entries = vmalloc(routing.nr * sizeof(*entries));
+ if (!entries)
+ goto out;
+ r = -EFAULT;
+ urouting = argp;
+ if (copy_from_user(entries, urouting->entries,
+ routing.nr * sizeof(*entries)))
+ goto out_free_irq_routing;
+ }
r = kvm_set_irq_routing(kvm, entries, routing.nr,
routing.flags);
out_free_irq_routing: