]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'v4.20-rc6' into for-4.21/block
authorJens Axboe <axboe@kernel.dk>
Mon, 10 Dec 2018 00:45:40 +0000 (17:45 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 10 Dec 2018 00:45:40 +0000 (17:45 -0700)
Pull in v4.20-rc6 to resolve the conflict in NVMe, but also to get the
two corruption fixes. We're going to be overhauling the direct dispatch
path, and we need to do that on top of the changes we made for that
in mainline.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
294 files changed:
Documentation/ABI/testing/sysfs-class-net-dsa
Documentation/devicetree/bindings/clock/clock-bindings.txt
Documentation/devicetree/bindings/input/input-reset.txt
Documentation/devicetree/bindings/media/rockchip-vpu.txt [deleted file]
Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
Documentation/media/uapi/mediactl/request-api.rst
Documentation/media/uapi/mediactl/request-func-close.rst
Documentation/media/uapi/mediactl/request-func-ioctl.rst
Documentation/media/uapi/mediactl/request-func-poll.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/cache.h
arch/arc/include/asm/io.h
arch/arc/kernel/setup.c
arch/arc/mm/cache.c
arch/arc/mm/fault.c
arch/arm/mm/cache-v7.S
arch/arm/mm/cache-v7m.S
arch/arm/mm/dma-mapping.c
arch/arm/mm/proc-macros.S
arch/arm/probes/kprobes/opt-arm.c
arch/arm64/boot/dts/qcom/sdm845-mtp.dts
arch/arm64/kernel/hibernate.c
arch/csky/include/asm/mmu_context.h
arch/parisc/Makefile
arch/powerpc/net/bpf_jit_comp64.c
arch/sparc/kernel/iommu.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/Makefile
arch/x86/include/asm/bootparam_utils.h
arch/x86/kernel/kprobes/opt.c
arch/x86/platform/efi/early_printk.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/blk-mq.c
crypto/Kconfig
crypto/cbc.c
crypto/cfb.c
crypto/pcbc.c
drivers/acpi/nfit/core.c
drivers/ata/libata-core.c
drivers/clk/mmp/clk.c
drivers/clk/mvebu/cp110-system-controller.c
drivers/clk/qcom/common.c
drivers/clk/zynqmp/clkc.c
drivers/dma/dw/core.c
drivers/dma/imx-sdma.c
drivers/dma/ti/cppi41.c
drivers/gnss/sirf.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/omapdrm/displays/panel-dpi.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/omapdss.h
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/hid/hid-hyperv.c
drivers/hv/channel_mgmt.c
drivers/hv/connection.c
drivers/hv/hyperv_vmbus.h
drivers/i2c/busses/i2c-axxia.c
drivers/i2c/busses/i2c-nvidia-gpu.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-scmi.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/ide/ide-proc.c
drivers/ide/pmac.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/synaptics.c
drivers/input/serio/hyperv-keyboard.c
drivers/input/touchscreen/migor_ts.c
drivers/input/touchscreen/st1232.c
drivers/media/dvb-frontends/dvb-pll.c
drivers/media/media-request.c
drivers/media/platform/vicodec/vicodec-core.c
drivers/media/usb/gspca/gspca.c
drivers/mfd/cros_ec_dev.c
drivers/net/bonding/bond_3ad.c
drivers/net/dsa/mv88e6060.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/ibm/emac/emac.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx4/Kconfig
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/macvlan.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp-bus.c
drivers/net/tun.c
drivers/net/virtio_net.c
drivers/net/wireless/mac80211_hwsim.c
drivers/nvdimm/nd-core.h
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/region_devs.c
drivers/nvme/host/core.c
drivers/nvme/target/rdma.c
drivers/pci/pcie/aspm.c
drivers/s390/virtio/virtio_ccw.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/storvsc_drv.c
drivers/scsi/vmw_pvscsi.c
drivers/staging/media/sunxi/cedrus/TODO
drivers/staging/rtl8712/mlme_linux.c
drivers/staging/rtl8712/rtl871x_mlme.c
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
drivers/thermal/armada_thermal.c
drivers/thermal/broadcom/bcm2835_thermal.c
drivers/thermal/broadcom/brcmstb_thermal.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/suncore.c
drivers/tty/tty_io.c
drivers/tty/tty_port.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/core/usb.c
drivers/usb/host/hwa-hc.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/appledisplay.c
drivers/usb/serial/console.c
drivers/vhost/vhost.c
drivers/vhost/vsock.c
fs/btrfs/tree-checker.c
fs/cifs/Kconfig
fs/cifs/dir.c
fs/cifs/file.c
fs/dax.c
fs/exec.c
fs/iomap.c
fs/nfs/direct.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/read_write.c
fs/splice.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_qm_bhv.c
include/linux/dax.h
include/linux/filter.h
include/linux/gfp.h
include/linux/hyperv.h
include/linux/mempolicy.h
include/linux/sfp.h
include/linux/sunrpc/xdr.h
include/linux/tty.h
include/linux/usb.h
include/media/media-request.h
include/net/neighbour.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/sound/pcm_params.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/bpf.h
kernel/bpf/btf.c
kernel/bpf/verifier.c
kernel/events/uprobes.c
kernel/stackleak.c
mm/huge_memory.c
mm/memory-failure.c
mm/mempolicy.c
mm/shmem.c
net/bpf/test_run.c
net/core/dev.c
net/core/filter.c
net/core/rtnetlink.c
net/dsa/master.c
net/dsa/slave.c
net/ipv4/ip_fragment.c
net/ipv4/ip_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c
net/ipv6/seg6_iptunnel.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/status.c
net/mac80211/tx.c
net/openvswitch/conntrack.c
net/sched/act_police.c
net/sched/cls_flower.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/chunk.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/sme.c
net/wireless/util.c
net/x25/af_x25.c
net/x25/x25_in.c
scripts/gcc-plugins/stackleak_plugin.c
sound/core/pcm_native.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/card.c
sound/usb/quirks.c
tools/bpf/bpftool/btf_dumper.c
tools/include/uapi/linux/bpf.h
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_sk_lookup_kern.c
tools/testing/selftests/bpf/test_verifier.c

index f240221e071ef7b76f6f3d4cdd584a34a4e6f212..985d84c585c669084d37fb3df5391a93b4094816 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/class/net/<iface>/tagging
+What:          /sys/class/net/<iface>/dsa/tagging
 Date:          August 2018
 KernelVersion: 4.20
 Contact:       netdev@vger.kernel.org
index 2ec489eebe723afb0f6cf1700d7869e9d84f0ac6..b646bbcf7f92489063b9f44acdb449ef8c84b416 100644 (file)
@@ -168,3 +168,19 @@ a shared clock is forbidden.
 
 Configuration of common clocks, which affect multiple consumer devices can
 be similarly specified in the clock provider node.
+
+==Protected clocks==
+
+Some platforms or firmwares may not fully expose all the clocks to the OS, such
+as in situations where those clks are used by drivers running in ARM secure
+execution levels. Such a configuration can be specified in device tree with the
+protected-clocks property in the form of a clock specifier list. This property should
+only be specified in the node that is providing the clocks being protected:
+
+   clock-controller@a000f000 {
+        compatible = "vendor,clk95;
+        reg = <0xa000f000 0x1000>
+        #clocks-cells = <1>;
+        ...
+        protected-clocks = <UART3_CLK>, <SPI5_CLK>;
+   };
index 2bb2626fdb78b4521fc06a08fec7107ec85a71a3..1ca6cc5ebf8ed83a2d9a5d5773ea9ff820d93b34 100644 (file)
@@ -12,7 +12,7 @@ The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define
 a set of keys.
 
 Required property:
-sysrq-reset-seq: array of Linux keycodes, one keycode per cell.
+keyset: array of Linux keycodes, one keycode per cell.
 
 Optional property:
 timeout-ms: duration keys must be pressed together in milliseconds before
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.txt b/Documentation/devicetree/bindings/media/rockchip-vpu.txt
deleted file mode 100644 (file)
index 35dc464..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-device-tree bindings for rockchip VPU codec
-
-Rockchip (Video Processing Unit) present in various Rockchip platforms,
-such as RK3288 and RK3399.
-
-Required properties:
-- compatible: value should be one of the following
-               "rockchip,rk3288-vpu";
-               "rockchip,rk3399-vpu";
-- interrupts: encoding and decoding interrupt specifiers
-- interrupt-names: should be "vepu" and "vdpu"
-- clocks: phandle to VPU aclk, hclk clocks
-- clock-names: should be "aclk" and "hclk"
-- power-domains: phandle to power domain node
-- iommus: phandle to a iommu node
-
-Example:
-SoC-specific DT entry:
-       vpu: video-codec@ff9a0000 {
-               compatible = "rockchip,rk3288-vpu";
-               reg = <0x0 0xff9a0000 0x0 0x800>;
-               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "vepu", "vdpu";
-               clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
-               clock-names = "aclk", "hclk";
-               power-domains = <&power RK3288_PD_VIDEO>;
-               iommus = <&vpu_mmu>;
-       };
index 0f8b31874002c79777e58a3e33892742ab10e495..de131f00c24966e29dde6e61b489cafef39c9b9e 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media_ioc_request_alloc:
 
index 6dd2d7fea7144502bb271ab2e156d028332cafcb..5d2604345e191c9a34fe63d3b57a2ed719e57fa0 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media_request_ioc_queue:
 
index febe888494c8dfb6e46ebb61ab39141155f3a343..ec61960c81ce9b4eb47381eef9b5dfb57df1e7fb 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media_request_ioc_reinit:
 
index 5f4a23029c487ca110bb81dcae3025d99f1ae285..945113dcb2185762e4d74d20712868755dbee0ca 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media-request-api:
 
index 098d7f2b9548231d3cd058847c05b42c7aa34642..dcf3f35bcf176d8a0d1a33e3b0d4a42c8832c72a 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _request-func-close:
 
index ff7b072a69991970aba4e3ea35c897bc10f0f4b4..11a22f8878439cb1fbae0837879b7e1b3c64d5da 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _request-func-ioctl:
 
index 85191254f381abea160fceef5c7e2fbbbefb9c7b..2609fd54d519cb2379f6ec19033ea87ac7c3469d 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _request-func-poll:
 
index 6682420421c15da8e7a23a2a308d06a673b98b58..8119141a926f3a577b0a351caa71bd39a894f65f 100644 (file)
@@ -1472,6 +1472,7 @@ F:        drivers/clk/sirf/
 F:     drivers/clocksource/timer-prima2.c
 F:     drivers/clocksource/timer-atlas7.c
 N:     [^a-z]sirf
+X:     drivers/gnss
 
 ARM/EBSA110 MACHINE SUPPORT
 M:     Russell King <linux@armlinux.org.uk>
@@ -3271,11 +3272,16 @@ S:      Maintained
 F:     sound/pci/oxygen/
 
 C-SKY ARCHITECTURE
-M:     Guo Ren <ren_guo@c-sky.com>
+M:     Guo Ren <guoren@kernel.org>
 T:     git https://github.com/c-sky/csky-linux.git
 S:     Supported
 F:     arch/csky/
 F:     Documentation/devicetree/bindings/csky/
+F:     drivers/irqchip/irq-csky-*
+F:     Documentation/devicetree/bindings/interrupt-controller/csky,*
+F:     drivers/clocksource/timer-gx6605s.c
+F:     drivers/clocksource/timer-mp-csky.c
+F:     Documentation/devicetree/bindings/timer/csky,*
 K:     csky
 N:     csky
 
@@ -6316,6 +6322,7 @@ F:        include/uapi/linux/gigaset_dev.h
 
 GNSS SUBSYSTEM
 M:     Johan Hovold <johan@kernel.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-gnss
 F:     Documentation/devicetree/bindings/gnss/
@@ -13883,6 +13890,13 @@ F:     drivers/md/raid*
 F:     include/linux/raid/
 F:     include/uapi/linux/raid/
 
+SOCIONEXT (SNI) AVE NETWORK DRIVER
+M:     Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/socionext/sni_ave.c
+F:     Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
+
 SOCIONEXT (SNI) NETSEC NETWORK DRIVER
 M:     Jassi Brar <jaswinder.singh@linaro.org>
 L:     netdev@vger.kernel.org
index e9fd22c8445ecf0c62da496324c8b9c61965078a..f2c3423c3062f2b704c239621d2093cc45280060 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 20
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index c9e2a1323536313c8ee0e10906db67902c846674..6dd7835573308602f6be4c3e71d654b19390a0f1 100644 (file)
@@ -109,7 +109,7 @@ endmenu
 
 choice
        prompt "ARC Instruction Set"
-       default ISA_ARCOMPACT
+       default ISA_ARCV2
 
 config ISA_ARCOMPACT
        bool "ARCompact ISA"
@@ -176,13 +176,11 @@ endchoice
 
 config CPU_BIG_ENDIAN
        bool "Enable Big Endian Mode"
-       default n
        help
          Build kernel for Big Endian Mode of ARC CPU
 
 config SMP
        bool "Symmetric Multi-Processing"
-       default n
        select ARC_MCIP if ISA_ARCV2
        help
          This enables support for systems with more than one CPU.
@@ -254,7 +252,6 @@ config ARC_CACHE_PAGES
 config ARC_CACHE_VIPT_ALIASING
        bool "Support VIPT Aliasing D$"
        depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
-       default n
 
 endif  #ARC_CACHE
 
@@ -262,7 +259,6 @@ config ARC_HAS_ICCM
        bool "Use ICCM"
        help
          Single Cycle RAMS to store Fast Path Code
-       default n
 
 config ARC_ICCM_SZ
        int "ICCM Size in KB"
@@ -273,7 +269,6 @@ config ARC_HAS_DCCM
        bool "Use DCCM"
        help
          Single Cycle RAMS to store Fast Path Data
-       default n
 
 config ARC_DCCM_SZ
        int "DCCM Size in KB"
@@ -366,13 +361,11 @@ if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
        bool "Setup Timer IRQ as high Priority"
-       default n
        # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
        depends on !SMP
 
 config ARC_FPU_SAVE_RESTORE
        bool "Enable FPU state persistence across context switch"
-       default n
        help
          Double Precision Floating Point unit had dedicated regs which
          need to be saved/restored across context-switch.
@@ -453,7 +446,6 @@ config HIGHMEM
 
 config ARC_HAS_PAE40
        bool "Support for the 40-bit Physical Address Extension"
-       default n
        depends on ISA_ARCV2
        select HIGHMEM
        select PHYS_ADDR_T_64BIT
@@ -496,7 +488,6 @@ config HZ
 
 config ARC_METAWARE_HLINK
        bool "Support for Metaware debugger assisted Host access"
-       default n
        help
          This options allows a Linux userland apps to directly access
          host file system (open/creat/read/write etc) with help from
@@ -524,13 +515,11 @@ config ARC_DW2_UNWIND
 
 config ARC_DBG_TLB_PARANOIA
        bool "Paranoia Checks in Low Level TLB Handlers"
-       default n
 
 endif
 
 config ARC_UBOOT_SUPPORT
        bool "Support uboot arg Handling"
-       default n
        help
          ARC Linux by default checks for uboot provided args as pointers to
          external cmdline or DTB. This however breaks in absence of uboot,
index c64c505d966c7a737b0db3c0590fb1063b549f69..df00578c279d4bc0ee03d71089769383440e7cf6 100644 (file)
@@ -6,7 +6,7 @@
 # published by the Free Software Foundation.
 #
 
-KBUILD_DEFCONFIG := nsim_700_defconfig
+KBUILD_DEFCONFIG := nsim_hs_defconfig
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
index ef149f59929ae394a30695fa0940060acef15817..43f17b51ee89cca00a0b2eebb7ed045d49de03a0 100644 (file)
                        bus-width = <4>;
                        dma-coherent;
                };
+
+               gpio: gpio@3000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = <0x3000 0x20>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       gpio_port_a: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <24>;
+                               reg = <0>;
+                       };
+               };
        };
 
        memory@80000000 {
index 41bc08be6a3b4202bbe27f74fdc8e01a56e4c3cd..020d4493edfd0530423659a4f401258ecf7bc0be 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
@@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 1e1c4a8011b523dc88b89fb39e90dfeab5a3154b..666314fffc601be8c455664152f9111814cdd446 100644 (file)
@@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 6b0c0cfd5c304fd6ae58fc3fd92d9cb53e086d2d..429832b8560b878b65be199f69eb740a6b362054 100644 (file)
@@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 1dec2b4bc5e6ea70696249d6815dfe69e73eb21c..87b23b7fb781470b2897e66ff5c0ad64fc2734c4 100644 (file)
@@ -45,6 +45,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_DWAPB=y
 # CONFIG_HWMON is not set
 CONFIG_DRM=y
 # CONFIG_DRM_FBDEV_EMULATION is not set
@@ -65,6 +68,7 @@ CONFIG_EXT3_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 31ba224bbfb474985b49930dea193c6bbb1a5f37..6e84060e7c90a2cbba081a46f87ab607aee1d22e 100644 (file)
@@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
@@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_ROOT_NFS=y
 CONFIG_DEBUG_INFO=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 8e0b8b134cd9ed89652b88aea3bade03881e95c9..219c2a65294b82176400c9833e3606cd79f87a1c 100644 (file)
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_LBDAF is not set
index f14eeff7d3084948c16d8905677ec25a629ccdcc..35dfc6491a09486ef0176b8ced1c080efa870ec0 100644 (file)
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_LBDAF is not set
@@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
index 025298a483056b1ca782e83056f8b0a44d193809..1638e5bc967246686735bd6629ce9d7087caffd4 100644 (file)
@@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
index df7b77b13b823dc0c8d41f543181b12a20212cbd..11cfbdb0f441567ee93d6283e9c8265454c818cf 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FTRACE=y
index a7f65313f84a56a3ddc0307c669bbfbcf4c0386f..e71ade3cf9c809398a8c51bffd5bdff3f39c465a 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
 # CONFIG_AIO is not set
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
index db47c3541f15931b2927fd1bd27749f2568e9761..1e59a2e9c602fa2736cfc0d6fdd439b07a11105b 100644 (file)
@@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index a8ac5e917d9a5895a4bc3ba30be01fd222ecec71..b5c3f6c54b032d2a84510737272cacbe1ec89b1c 100644 (file)
@@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index ff7d3232764a29a41503a213d3bd385e232acf42..f393b663413e49ab38bf0d4070cb7ea9f39bcfd1 100644 (file)
@@ -113,7 +113,9 @@ extern unsigned long perip_base, perip_end;
 
 /* IO coherency related Auxiliary registers */
 #define ARC_REG_IO_COH_ENABLE  0x500
+#define ARC_IO_COH_ENABLE_BIT  BIT(0)
 #define ARC_REG_IO_COH_PARTIAL 0x501
+#define ARC_IO_COH_PARTIAL_BIT BIT(0)
 #define ARC_REG_IO_COH_AP0_BASE        0x508
 #define ARC_REG_IO_COH_AP0_SIZE        0x509
 
index c22b181e8206f3162c4e0e19214f8b303f13c576..2f39d9b3886e4fc638dfa6a8a9b2fc45453d6c69 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/page.h>
+#include <asm/unaligned.h>
 
 #ifdef CONFIG_ISA_ARCV2
 #include <asm/barrier.h>
@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
        return w;
 }
 
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr,   \
+                                 void *ptr, unsigned int count)        \
+{                                                                      \
+       bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
+       u##t *buf = ptr;                                                \
+                                                                       \
+       if (!count)                                                     \
+               return;                                                 \
+                                                                       \
+       /* Some ARC CPU's don't support unaligned accesses */           \
+       if (is_aligned) {                                               \
+               do {                                                    \
+                       u##t x = __raw_read##f(addr);                   \
+                       *buf++ = x;                                     \
+               } while (--count);                                      \
+       } else {                                                        \
+               do {                                                    \
+                       u##t x = __raw_read##f(addr);                   \
+                       put_unaligned(x, buf++);                        \
+               } while (--count);                                      \
+       }                                                               \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
 #define __raw_writeb __raw_writeb
 static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
 {
@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
+#define __raw_writesx(t,f)                                             \
+static inline void __raw_writes##f(volatile void __iomem *addr,        \
+                                  const void *ptr, unsigned int count) \
+{                                                                      \
+       bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
+       const u##t *buf = ptr;                                          \
+                                                                       \
+       if (!count)                                                     \
+               return;                                                 \
+                                                                       \
+       /* Some ARC CPU's don't support unaligned accesses */           \
+       if (is_aligned) {                                               \
+               do {                                                    \
+                       __raw_write##f(*buf++, addr);                   \
+               } while (--count);                                      \
+       } else {                                                        \
+               do {                                                    \
+                       __raw_write##f(get_unaligned(buf++), addr);     \
+               } while (--count);                                      \
+       }                                                               \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 #define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l)          ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l)          ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l)          ({ __raw_readsl(p,d,l); __iormb(); })
 
 #define writeb(v,c)            ({ __iowmb(); writeb_relaxed(v,c); })
 #define writew(v,c)            ({ __iowmb(); writew_relaxed(v,c); })
 #define writel(v,c)            ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l)         ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l)         ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l)         ({ __iowmb(); __raw_writesl(p,d,l); })
 
 /*
  * Relaxed API for drivers which can handle barrier ordering themselves
index b2cae79a25d716165eaf65060cb8ed0be11f3b6c..eea8c5ce633504ec0e7a8f4d6a49ce6042fef4a8 100644 (file)
@@ -243,7 +243,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
        struct bcr_identity *core = &cpu->core;
-       int i, n = 0;
+       int i, n = 0, ua = 0;
 
        FIX_PTR(cpu);
 
@@ -263,10 +263,13 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
                       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
 
-       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
+#ifdef __ARC_UNALIGNED__
+       ua = 1;
+#endif
+       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
                           IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
                           IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
-                          IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
+                          IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
 
        if (i)
                n += scnprintf(buf + n, len - n, "\n\t\t: ");
index f2701c13a66b209571ff89b71ac6c93cabb9835d..cf9619d4efb4f86d68cb2417558fe3327c55c408 100644 (file)
@@ -1144,6 +1144,20 @@ noinline void __init arc_ioc_setup(void)
 {
        unsigned int ioc_base, mem_sz;
 
+       /*
+        * If IOC was already enabled (due to bootloader) it technically needs to
+        * be reconfigured with aperture base,size corresponding to Linux memory map
+        * which will certainly be different than uboot's. But disabling and
+        * reenabling IOC when DMA might be potentially active is tricky business.
+        * To avoid random memory issues later, just panic here and ask user to
+        * upgrade bootloader to one which doesn't enable IOC
+        */
+       if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
+               panic("IOC already enabled, please upgrade bootloader!\n");
+
+       if (!ioc_enable)
+               return;
+
        /*
         * As for today we don't support both IOC and ZONE_HIGHMEM enabled
         * simultaneously. This happens because as of today IOC aperture covers
@@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void)
                panic("IOC Aperture start must be aligned to the size of the aperture");
 
        write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
-       write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
-       write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+       write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
+       write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
 
        /* Re-enable L1 dcache */
        __dc_enable();
@@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void)
        if (is_isa_arcv2() && l2_line_sz && !slc_enable)
                arc_slc_disable();
 
-       if (is_isa_arcv2() && ioc_enable)
+       if (is_isa_arcv2() && ioc_exists)
                arc_ioc_setup();
 
        if (is_isa_arcv2() && l2_line_sz && slc_enable) {
index c9da6102eb4fba4eb7f79224f826517d8f20e5c1..e2d9fc3fea01e7a93b4e7bb0b16c7b0874d33e5f 100644 (file)
@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
        struct vm_area_struct *vma = NULL;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       int si_code;
+       int si_code = 0;
        int ret;
        vm_fault_t fault;
        int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
index 215df435bfb9881f347d59f90aa0a98765d304b4..2149b47a0c5ace25958929ca44692df779950fbf 100644 (file)
@@ -360,14 +360,16 @@ v7_dma_inv_range:
        ALT_UP(W(nop))
 #endif
        mcrne   p15, 0, r0, c7, c14, 1          @ clean & invalidate D / U line
+       addne   r0, r0, r2
 
        tst     r1, r3
        bic     r1, r1, r3
        mcrne   p15, 0, r1, c7, c14, 1          @ clean & invalidate D / U line
-1:
-       mcr     p15, 0, r0, c7, c6, 1           @ invalidate D / U line
-       add     r0, r0, r2
        cmp     r0, r1
+1:
+       mcrlo   p15, 0, r0, c7, c6, 1           @ invalidate D / U line
+       addlo   r0, r0, r2
+       cmplo   r0, r1
        blo     1b
        dsb     st
        ret     lr
index 788486e830d3e644bbf4c608af6c75e64b5bb84a..32aa2a2aa260cb59eb10557f2c4159588e350b3c 100644 (file)
 /*
  * dcimvac: Invalidate data cache line by MVA to PoC
  */
-.macro dcimvac, rt, tmp
-       v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
+.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+.macro dcimvac\c, rt, tmp
+       v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
 .endm
+.endr
 
 /*
  * dccmvau: Clean data cache line by MVA to PoU
@@ -369,14 +371,16 @@ v7m_dma_inv_range:
        tst     r0, r3
        bic     r0, r0, r3
        dccimvacne r0, r3
+       addne   r0, r0, r2
        subne   r3, r2, #1      @ restore r3, corrupted by v7m's dccimvac
        tst     r1, r3
        bic     r1, r1, r3
        dccimvacne r1, r3
-1:
-       dcimvac r0, r3
-       add     r0, r0, r2
        cmp     r0, r1
+1:
+       dcimvaclo r0, r3
+       addlo   r0, r0, r2
+       cmplo   r0, r1
        blo     1b
        dsb     st
        ret     lr
index 661fe48ab78da175732920d87046ec7460bc5d8f..78de138aa66dc48ccdfc223dc75dd017961feb28 100644 (file)
@@ -829,7 +829,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                 unsigned long attrs)
 {
-       int ret;
+       int ret = -ENXIO;
        unsigned long nr_vma_pages = vma_pages(vma);
        unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long pfn = dma_to_pfn(dev, dma_addr);
index 81d0efb055c66080e976f9504c69866f7699b1a6..19516fbc2c55a65c761094bf3a67c8cd69568183 100644 (file)
        .endm
 
 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .section ".rodata"
+#endif
        .type   \name\()_processor_functions, #object
        .align 2
 ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
        .endif
 
        .size   \name\()_processor_functions, . - \name\()_processor_functions
+#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .previous
+#endif
 .endm
 
 .macro define_cache_functions name:req
index b2aa9b32bff2b5e9d2e6d102a4cd58f6cf8c5676..2c118a6ab358736e8227214b081fce343b48b29f 100644 (file)
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
        }
 
        /* Copy arch-dep-instance from template. */
-       memcpy(code, &optprobe_template_entry,
+       memcpy(code, (unsigned char *)optprobe_template_entry,
                        TMPL_END_IDX * sizeof(kprobe_opcode_t));
 
        /* Adjust buffer according to instruction. */
index d667eee4e6d03311b49185a1730e5795608a1a54..b3def035817758fb4baf710684818d3fbeec0f42 100644 (file)
        };
 };
 
+&gcc {
+       protected-clocks = <GCC_QSPI_CORE_CLK>,
+                          <GCC_QSPI_CORE_CLK_SRC>,
+                          <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+};
+
 &i2c10 {
        status = "okay";
        clock-frequency = <400000>;
index 6b2686d54411fdc0a92e3d4cda2dd38f5b21d40b..29cdc99688f335075dcfa71ed0b387d9ca548538 100644 (file)
@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
        }
 
        memcpy((void *)dst, src_start, length);
-       flush_icache_range(dst, dst + length);
+       __flush_icache_range(dst, dst + length);
 
        pgdp = pgd_offset_raw(allocator(mask), dst_addr);
        if (pgd_none(READ_ONCE(*pgdp))) {
index c410aa4fff1a19efcf5234835409762dd72163b6..b2905c0485a72a177c00c69fceea5be092624cdb 100644 (file)
@@ -16,7 +16,7 @@
 
 static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
 {
-       pgd &= ~(1<<31);
+       pgd -= PAGE_OFFSET;
        pgd += PHYS_OFFSET;
        pgd |= 1;
        setup_pgd(pgd, kernel);
@@ -29,7 +29,7 @@ static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
 
 static inline unsigned long tlb_get_pgd(void)
 {
-       return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
+       return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET;
 }
 
 #define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
index d047a09d660f003de3b3059984de4d9974ae646d..1085385e1f06a433ccf840fae55b8f76f613e97a 100644 (file)
@@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS
 KBUILD_CFLAGS_KERNEL += -mlong-calls
 endif
 
+# Without this, "ld -r" results in .text sections that are too big (> 0x40000)
+# for branches to reach stubs. And multiple .text sections trigger a warning
+# when creating the sysfs module information section.
+ifndef CONFIG_64BIT
+KBUILD_CFLAGS_MODULE += -ffunction-sections
+endif
+
 # select which processor to optimise for
 cflags-$(CONFIG_PA7000)                += -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7200)                += -march=1.1 -mschedule=7200
index 17482f5de3e262f53d67b4199490660362b394a0..9393e231cbc2813e1f24c4484c62bb87fbf24a53 100644 (file)
@@ -891,6 +891,55 @@ cond_branch:
        return 0;
 }
 
+/* Fix the branch target addresses for subprog calls */
+static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
+                                      struct codegen_context *ctx, u32 *addrs)
+{
+       const struct bpf_insn *insn = fp->insnsi;
+       bool func_addr_fixed;
+       u64 func_addr;
+       u32 tmp_idx;
+       int i, ret;
+
+       for (i = 0; i < fp->len; i++) {
+               /*
+                * During the extra pass, only the branch target addresses for
+                * the subprog calls need to be fixed. All other instructions
+                * can left untouched.
+                *
+                * The JITed image length does not change because we already
+                * ensure that the JITed instruction sequence for these calls
+                * are of fixed length by padding them with NOPs.
+                */
+               if (insn[i].code == (BPF_JMP | BPF_CALL) &&
+                   insn[i].src_reg == BPF_PSEUDO_CALL) {
+                       ret = bpf_jit_get_func_addr(fp, &insn[i], true,
+                                                   &func_addr,
+                                                   &func_addr_fixed);
+                       if (ret < 0)
+                               return ret;
+
+                       /*
+                        * Save ctx->idx as this would currently point to the
+                        * end of the JITed image and set it to the offset of
+                        * the instruction sequence corresponding to the
+                        * subprog call temporarily.
+                        */
+                       tmp_idx = ctx->idx;
+                       ctx->idx = addrs[i] / 4;
+                       bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
+                       /*
+                        * Restore ctx->idx here. This is safe as the length
+                        * of the JITed sequence remains unchanged.
+                        */
+                       ctx->idx = tmp_idx;
+               }
+       }
+
+       return 0;
+}
+
 struct powerpc64_jit_data {
        struct bpf_binary_header *header;
        u32 *addrs;
@@ -989,6 +1038,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 skip_init_ctx:
        code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
 
+       if (extra_pass) {
+               /*
+                * Do not touch the prologue and epilogue as they will remain
+                * unchanged. Only fix the branch target address for subprog
+                * calls in the body.
+                *
+                * This does not change the offsets and lengths of the subprog
+                * call instruction sequences and hence, the size of the JITed
+                * image as well.
+                */
+               bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
+
+               /* There is no need to perform the usual passes. */
+               goto skip_codegen_passes;
+       }
+
        /* Code generation passes 1-2 */
        for (pass = 1; pass < 3; pass++) {
                /* Now build the prologue, body code & epilogue for real. */
@@ -1002,6 +1067,7 @@ skip_init_ctx:
                                proglen - (cgctx.idx * 4), cgctx.seen);
        }
 
+skip_codegen_passes:
        if (bpf_jit_enable > 1)
                /*
                 * Note that we output the base address of the code_base
index 40d008b0bd3e98e43d07469dbf73d4c358507b29..05eb016fc41be2856632ddebce30599567d365ae 100644 (file)
@@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
        /* Allocate and initialize the free area map.  */
        sz = num_tsb_entries / 8;
        sz = (sz + 7UL) & ~7UL;
-       iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
+       iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
        if (!iommu->tbl.map)
                return -ENOMEM;
-       memset(iommu->tbl.map, 0, sz);
 
        iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
                            (tlb_type != hypervisor ? iommu_flushall : NULL),
index 4c5b3fcbed94c376a2a44bb0c4fdaab139b3a169..e800ce13cc6e5bb2646e10ad0cdd0c71e9a80d44 100644 (file)
@@ -683,6 +683,7 @@ void do_signal32(struct pt_regs * regs)
                                regs->tpc -= 4;
                                regs->tnpc -= 4;
                                pt_regs_clear_syscall(regs);
+                               /* fall through */
                        case ERESTART_RESTARTBLOCK:
                                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                                regs->tpc -= 4;
index 5665261cee37f2330ab7dbd08ecce91593301a64..83953780ca016c4944e2284014c50eb661357aa4 100644 (file)
@@ -508,6 +508,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                                regs->pc -= 4;
                                regs->npc -= 4;
                                pt_regs_clear_syscall(regs);
+                               /* fall through */
                        case ERESTART_RESTARTBLOCK:
                                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                                regs->pc -= 4;
index e9de1803a22e004adbb3d6b541ad552f71626e7d..ca70787efd8e05de3d4ec8a3f9a8040bfb4c4844 100644 (file)
@@ -533,6 +533,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                                regs->tpc -= 4;
                                regs->tnpc -= 4;
                                pt_regs_clear_syscall(regs);
+                               /* fall through */
                        case ERESTART_RESTARTBLOCK:
                                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                                regs->tpc -= 4;
index f5d7f4134524965a1233618c1404f4849d7c51c9..75ef499a66e2b81c82fb6abb9bb4bd9a64521e73 100644 (file)
@@ -220,9 +220,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
 # Avoid indirect branches in kernel to deal with Spectre
 ifdef CONFIG_RETPOLINE
-ifeq ($(RETPOLINE_CFLAGS),)
-  $(error You are building kernel with non-retpoline compiler, please update your compiler.)
-endif
   KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
 endif
 
@@ -307,6 +304,13 @@ ifndef CC_HAVE_ASM_GOTO
        @echo Compiler lacks asm-goto support.
        @exit 1
 endif
+ifdef CONFIG_RETPOLINE
+ifeq ($(RETPOLINE_CFLAGS),)
+       @echo "You are building kernel with non-retpoline compiler." >&2
+       @echo "Please update your compiler." >&2
+       @false
+endif
+endif
 
 archclean:
        $(Q)rm -rf $(objtree)/arch/i386
index 8b4c5e0011572f7a0a7636d769863bfbe3beb620..544ac4fafd112a8b32802e0c4c5a4392b71bd67c 100644 (file)
@@ -1,3 +1,4 @@
+
 /* -----------------------------------------------------------------------
  *
  *   Copyright 2011 Intel Corporation; author Matt Fleming
@@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
        return status;
 }
 
+static efi_status_t allocate_e820(struct boot_params *params,
+                                 struct setup_data **e820ext,
+                                 u32 *e820ext_size)
+{
+       unsigned long map_size, desc_size, buff_size;
+       struct efi_boot_memmap boot_map;
+       efi_memory_desc_t *map;
+       efi_status_t status;
+       __u32 nr_desc;
+
+       boot_map.map            = &map;
+       boot_map.map_size       = &map_size;
+       boot_map.desc_size      = &desc_size;
+       boot_map.desc_ver       = NULL;
+       boot_map.key_ptr        = NULL;
+       boot_map.buff_size      = &buff_size;
+
+       status = efi_get_memory_map(sys_table, &boot_map);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       nr_desc = buff_size / desc_size;
+
+       if (nr_desc > ARRAY_SIZE(params->e820_table)) {
+               u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
+
+               status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+               if (status != EFI_SUCCESS)
+                       return status;
+       }
+
+       return EFI_SUCCESS;
+}
+
 struct exit_boot_struct {
        struct boot_params      *boot_params;
        struct efi_info         *efi;
-       struct setup_data       *e820ext;
-       __u32                   e820ext_size;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
                                   struct efi_boot_memmap *map,
                                   void *priv)
 {
-       static bool first = true;
        const char *signature;
        __u32 nr_desc;
        efi_status_t status;
        struct exit_boot_struct *p = priv;
 
-       if (first) {
-               nr_desc = *map->buff_size / *map->desc_size;
-               if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
-                       u32 nr_e820ext = nr_desc -
-                                       ARRAY_SIZE(p->boot_params->e820_table);
-
-                       status = alloc_e820ext(nr_e820ext, &p->e820ext,
-                                              &p->e820ext_size);
-                       if (status != EFI_SUCCESS)
-                               return status;
-               }
-               first = false;
-       }
-
        signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
                                   : EFI32_LOADER_SIGNATURE;
        memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
@@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
 {
        unsigned long map_sz, key, desc_size, buff_size;
        efi_memory_desc_t *mem_map;
-       struct setup_data *e820ext;
-       __u32 e820ext_size;
+       struct setup_data *e820ext = NULL;
+       __u32 e820ext_size = 0;
        efi_status_t status;
        __u32 desc_version;
        struct efi_boot_memmap map;
@@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
        map.buff_size           = &buff_size;
        priv.boot_params        = boot_params;
        priv.efi                = &boot_params->efi_info;
-       priv.e820ext            = NULL;
-       priv.e820ext_size       = 0;
+
+       status = allocate_e820(boot_params, &e820ext, &e820ext_size);
+       if (status != EFI_SUCCESS)
+               return status;
 
        /* Might as well exit boot services now */
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
        if (status != EFI_SUCCESS)
                return status;
 
-       e820ext                 = priv.e820ext;
-       e820ext_size            = priv.e820ext_size;
-
        /* Historic? */
        boot_params->alt_mem_k  = 32 * 1024;
 
index ce25d84023c021ce25f041cd81497500f20c3a60..1f0efdb7b6294daba3e315be0b990ba8296b3fea 100644 (file)
@@ -566,6 +566,7 @@ ENTRY(interrupt_entry)
 
        ret
 END(interrupt_entry)
+_ASM_NOKPROBE(interrupt_entry)
 
 
 /* Interrupt entry/exit. */
@@ -766,6 +767,7 @@ native_irq_return_ldt:
        jmp     native_irq_return_iret
 #endif
 END(common_interrupt)
+_ASM_NOKPROBE(common_interrupt)
 
 /*
  * APIC interrupts.
@@ -780,6 +782,7 @@ ENTRY(\sym)
        call    \do_sym /* rdi points to pt_regs */
        jmp     ret_from_intr
 END(\sym)
+_ASM_NOKPROBE(\sym)
 .endm
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
@@ -960,6 +963,7 @@ ENTRY(\sym)
 
        jmp     error_exit
        .endif
+_ASM_NOKPROBE(\sym)
 END(\sym)
 .endm
 
index 141d415a8c8098e9bd9747c94ee84e4de843c9f8..0624bf2266fd76d2852ce005acb2f9d67dbe6b8f 100644 (file)
@@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
 CPPFLAGS_vdso.lds += -P -C
 
 VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
-                       -z max-page-size=4096 -z common-page-size=4096
+                       -z max-page-size=4096
 
 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
@@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
 
 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
 VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
-                          -z max-page-size=4096 -z common-page-size=4096
+                          -z max-page-size=4096
 
 # x32-rebranded versions
 vobjx32s-y := $(vobjs-y:.o=-x32.o)
index a07ffd23e4dd67d3e182bd803eb868eaef1bcdf5..f6f6ef436599a6dbd7ecb8c86da4d69dab0a9763 100644 (file)
@@ -36,6 +36,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
+               boot_params->acpi_rsdp_addr = 0;
                memset(&boot_params->ext_ramdisk_image, 0,
                       (char *)&boot_params->efi_info -
                        (char *)&boot_params->ext_ramdisk_image);
index 40b16b2706560e409dfe57a9817d2c1832a4a89e..6adf6e6c2933945598b32a530098b3dc1b2be690 100644 (file)
@@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
        int len = 0, ret;
 
        while (len < RELATIVEJUMP_SIZE) {
-               ret = __copy_instruction(dest + len, src + len, real, &insn);
+               ret = __copy_instruction(dest + len, src + len, real + len, &insn);
                if (!ret || !can_boost(&insn, src + len))
                        return -EINVAL;
                len += ret;
index 7476b3b097e1e94dc0c305c6e9ef18a4e41633b3..7138bc7a265c016359e372d8274f3a4f6f068215 100644 (file)
@@ -183,7 +183,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
                        num--;
                }
 
-               if (efi_x >= si->lfb_width) {
+               if (efi_x + font->width > si->lfb_width) {
                        efi_x = 0;
                        efi_y += font->height;
                }
index 3d1f319fe9773b5ae22a417c36ac7ac5c13c1b98..cd307767a134b837d36fa1719353dea237a621f7 100644 (file)
@@ -638,7 +638,7 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
                 bfqd->queue_weights_tree.rb_node->rb_right)
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
               ) ||
-               (bfqd->num_active_groups > 0
+               (bfqd->num_groups_with_pending_reqs > 0
 #endif
               );
 }
@@ -802,7 +802,21 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
                         */
                        break;
                }
-               bfqd->num_active_groups--;
+
+               /*
+                * The decrement of num_groups_with_pending_reqs is
+                * not performed immediately upon the deactivation of
+                * entity, but it is delayed to when it also happens
+                * that the first leaf descendant bfqq of entity gets
+                * all its pending requests completed. The following
+                * instructions perform this delayed decrement, if
+                * needed. See the comments on
+                * num_groups_with_pending_reqs for details.
+                */
+               if (entity->in_groups_with_pending_reqs) {
+                       entity->in_groups_with_pending_reqs = false;
+                       bfqd->num_groups_with_pending_reqs--;
+               }
        }
 }
 
@@ -3529,27 +3543,44 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
         * fact, if there are active groups, then, for condition (i)
         * to become false, it is enough that an active group contains
         * more active processes or sub-groups than some other active
-        * group. We address this issue with the following bi-modal
-        * behavior, implemented in the function
+        * group. More precisely, for condition (i) to hold because of
+        * such a group, it is not even necessary that the group is
+        * (still) active: it is sufficient that, even if the group
+        * has become inactive, some of its descendant processes still
+        * have some request already dispatched but still waiting for
+        * completion. In fact, requests have still to be guaranteed
+        * their share of the throughput even after being
+        * dispatched. In this respect, it is easy to show that, if a
+        * group frequently becomes inactive while still having
+        * in-flight requests, and if, when this happens, the group is
+        * not considered in the calculation of whether the scenario
+        * is asymmetric, then the group may fail to be guaranteed its
+        * fair share of the throughput (basically because idling may
+        * not be performed for the descendant processes of the group,
+        * but it had to be).  We address this issue with the
+        * following bi-modal behavior, implemented in the function
         * bfq_symmetric_scenario().
         *
-        * If there are active groups, then the scenario is tagged as
+        * If there are groups with requests waiting for completion
+        * (as commented above, some of these groups may even be
+        * already inactive), then the scenario is tagged as
         * asymmetric, conservatively, without checking any of the
         * conditions (i) and (ii). So the device is idled for bfqq.
         * This behavior matches also the fact that groups are created
-        * exactly if controlling I/O (to preserve bandwidth and
-        * latency guarantees) is a primary concern.
+        * exactly if controlling I/O is a primary concern (to
+        * preserve bandwidth and latency guarantees).
         *
-        * On the opposite end, if there are no active groups, then
-        * only condition (i) is actually controlled, i.e., provided
-        * that condition (i) holds, idling is not performed,
-        * regardless of whether condition (ii) holds. In other words,
-        * only if condition (i) does not hold, then idling is
-        * allowed, and the device tends to be prevented from queueing
-        * many requests, possibly of several processes. Since there
-        * are no active groups, then, to control condition (i) it is
-        * enough to check whether all active queues have the same
-        * weight.
+        * On the opposite end, if there are no groups with requests
+        * waiting for completion, then only condition (i) is actually
+        * controlled, i.e., provided that condition (i) holds, idling
+        * is not performed, regardless of whether condition (ii)
+        * holds. In other words, only if condition (i) does not hold,
+        * then idling is allowed, and the device tends to be
+        * prevented from queueing many requests, possibly of several
+        * processes. Since there are no groups with requests waiting
+        * for completion, then, to control condition (i) it is enough
+        * to check just whether all the queues with requests waiting
+        * for completion also have the same weight.
         *
         * Not checking condition (ii) evidently exposes bfqq to the
         * risk of getting less throughput than its fair share.
@@ -3607,10 +3638,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
         * bfqq is weight-raised is checked explicitly here. More
         * precisely, the compound condition below takes into account
         * also the fact that, even if bfqq is being weight-raised,
-        * the scenario is still symmetric if all active queues happen
-        * to be weight-raised. Actually, we should be even more
-        * precise here, and differentiate between interactive weight
-        * raising and soft real-time weight raising.
+        * the scenario is still symmetric if all queues with requests
+        * waiting for completion happen to be
+        * weight-raised. Actually, we should be even more precise
+        * here, and differentiate between interactive weight raising
+        * and soft real-time weight raising.
         *
         * As a side note, it is worth considering that the above
         * device-idling countermeasures may however fail in the
@@ -5417,7 +5449,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
        bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
 
        bfqd->queue_weights_tree = RB_ROOT;
-       bfqd->num_active_groups = 0;
+       bfqd->num_groups_with_pending_reqs = 0;
 
        INIT_LIST_HEAD(&bfqd->active_list);
        INIT_LIST_HEAD(&bfqd->idle_list);
index 77651d817ecd36fe59827f2aa55f9c4ec5ffb979..0b02bf302de07706fbfdc5b5ef66da2545e01eee 100644 (file)
@@ -196,6 +196,9 @@ struct bfq_entity {
 
        /* flag, set to request a weight, ioprio or ioprio_class change  */
        int prio_changed;
+
+       /* flag, set if the entity is counted in groups_with_pending_reqs */
+       bool in_groups_with_pending_reqs;
 };
 
 struct bfq_group;
@@ -448,10 +451,54 @@ struct bfq_data {
         * bfq_weights_tree_[add|remove] for further details).
         */
        struct rb_root queue_weights_tree;
+
        /*
-        * number of groups with requests still waiting for completion
+        * Number of groups with at least one descendant process that
+        * has at least one request waiting for completion. Note that
+        * this accounts for also requests already dispatched, but not
+        * yet completed. Therefore this number of groups may differ
+        * (be larger) than the number of active groups, as a group is
+        * considered active only if its corresponding entity has
+        * descendant queues with at least one request queued. This
+        * number is used to decide whether a scenario is symmetric.
+        * For a detailed explanation see comments on the computation
+        * of the variable asymmetric_scenario in the function
+        * bfq_better_to_idle().
+        *
+        * However, it is hard to compute this number exactly, for
+        * groups with multiple descendant processes. Consider a group
+        * that is inactive, i.e., that has no descendant process with
+        * pending I/O inside BFQ queues. Then suppose that
+        * num_groups_with_pending_reqs is still accounting for this
+        * group, because the group has descendant processes with some
+        * I/O request still in flight. num_groups_with_pending_reqs
+        * should be decremented when the in-flight request of the
+        * last descendant process is finally completed (assuming that
+        * nothing else has changed for the group in the meantime, in
+        * terms of composition of the group and active/inactive state of child
+        * groups and processes). To accomplish this, an additional
+        * pending-request counter must be added to entities, and must
+        * be updated correctly. To avoid this additional field and operations,
+        * we resort to the following tradeoff between simplicity and
+        * accuracy: for an inactive group that is still counted in
+        * num_groups_with_pending_reqs, we decrement
+        * num_groups_with_pending_reqs when the first descendant
+        * process of the group remains with no request waiting for
+        * completion.
+        *
+        * Even this simpler decrement strategy requires a little
+        * carefulness: to avoid multiple decrements, we flag a group,
+        * more precisely an entity representing a group, as still
+        * counted in num_groups_with_pending_reqs when it becomes
+        * inactive. Then, when the first descendant queue of the
+        * entity remains with no request waiting for completion,
+        * num_groups_with_pending_reqs is decremented, and this flag
+        * is reset. After this flag is reset for the entity,
+        * num_groups_with_pending_reqs won't be decremented any
+        * longer in case a new descendant queue of the entity remains
+        * with no request waiting for completion.
         */
-       unsigned int num_active_groups;
+       unsigned int num_groups_with_pending_reqs;
 
        /*
         * Number of bfq_queues containing requests (including the
index 4b0d5fb6916005571d4d4b9885e5a24d194e7a7d..63e0f12be7c98fe7770eb9f1e817f5319690c392 100644 (file)
@@ -1012,7 +1012,10 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
                        container_of(entity, struct bfq_group, entity);
                struct bfq_data *bfqd = bfqg->bfqd;
 
-               bfqd->num_active_groups++;
+               if (!entity->in_groups_with_pending_reqs) {
+                       entity->in_groups_with_pending_reqs = true;
+                       bfqd->num_groups_with_pending_reqs++;
+               }
        }
 #endif
 
index 7f478ae288af70d0246397270ad43d18fc7ff1f1..b645275dfe5f22ea9ce39be7757e219388cfc093 100644 (file)
@@ -1831,7 +1831,7 @@ insert:
        if (bypass_insert)
                return BLK_STS_RESOURCE;
 
-       blk_mq_sched_insert_request(rq, false, run_queue, false);
+       blk_mq_request_bypass_insert(rq, run_queue);
        return BLK_STS_OK;
 }
 
@@ -1847,7 +1847,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
        if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
-               blk_mq_sched_insert_request(rq, false, true, false);
+               blk_mq_request_bypass_insert(rq, true);
        else if (ret != BLK_STS_OK)
                blk_mq_end_request(rq, ret);
 
@@ -1881,7 +1881,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                if (ret != BLK_STS_OK) {
                        if (ret == BLK_STS_RESOURCE ||
                                        ret == BLK_STS_DEV_RESOURCE) {
-                               list_add(&rq->queuelist, list);
+                               blk_mq_request_bypass_insert(rq,
+                                                       list_empty(list));
                                break;
                        }
                        blk_mq_end_request(rq, ret);
index f7a235db56aaa78ee8cfa05ef391ff0e09302aba..05c91eb10ca1fd97fde1dd0593d975b99b798e1c 100644 (file)
@@ -1812,7 +1812,7 @@ config CRYPTO_USER_API_AEAD
          cipher algorithms.
 
 config CRYPTO_STATS
-       bool "Crypto usage statistics for User-space"
+       bool
        help
          This option enables the gathering of crypto stats.
          This will collect:
index b761b1f9c6ca161c8eb3a9340ab50b69374671bc..dd5f332fd5668985c9e904b35ad56d47b34ed383 100644 (file)
@@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        spawn = skcipher_instance_ctx(inst);
        err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
-       crypto_mod_put(alg);
        if (err)
-               goto err_free_inst;
+               goto err_put_alg;
 
        err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
        if (err)
@@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        err = skcipher_register_instance(tmpl, inst);
        if (err)
                goto err_drop_spawn;
+       crypto_mod_put(alg);
 
 out:
        return err;
 
 err_drop_spawn:
        crypto_drop_spawn(spawn);
+err_put_alg:
+       crypto_mod_put(alg);
 err_free_inst:
        kfree(inst);
        goto out;
index a0d68c09e1b9c53dd9eb4fb9bd08238d24b70d44..20987d0e09d89ceafbd51d3b61b321dc58a56862 100644 (file)
@@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
        spawn = skcipher_instance_ctx(inst);
        err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
-       crypto_mod_put(alg);
        if (err)
-               goto err_free_inst;
+               goto err_put_alg;
 
        err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
        if (err)
@@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
        err = skcipher_register_instance(tmpl, inst);
        if (err)
                goto err_drop_spawn;
+       crypto_mod_put(alg);
 
 out:
        return err;
 
 err_drop_spawn:
        crypto_drop_spawn(spawn);
+err_put_alg:
+       crypto_mod_put(alg);
 err_free_inst:
        kfree(inst);
        goto out;
index ef802f6e964218f06d00b035fc66d960cd4ce700..8aa10144407c04f936061c51d823d932660e2caa 100644 (file)
@@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        spawn = skcipher_instance_ctx(inst);
        err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
-       crypto_mod_put(alg);
        if (err)
-               goto err_free_inst;
+               goto err_put_alg;
 
        err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
        if (err)
@@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        err = skcipher_register_instance(tmpl, inst);
        if (err)
                goto err_drop_spawn;
+       crypto_mod_put(alg);
 
 out:
        return err;
 
 err_drop_spawn:
        crypto_drop_spawn(spawn);
+err_put_alg:
+       crypto_mod_put(alg);
 err_free_inst:
        kfree(inst);
        goto out;
index 14d9f5bea0151c39e706ebfb4ecddb1962fdcc33..5912d30020c7100025dbab0b8cccf09697d1580a 100644 (file)
@@ -1308,7 +1308,7 @@ static ssize_t scrub_store(struct device *dev,
        if (nd_desc) {
                struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 
-               rc = acpi_nfit_ars_rescan(acpi_desc, 0);
+               rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
        }
        device_unlock(dev);
        if (rc)
index a7f5202a48152a42e6b2b7ab5835289d8cb6fcca..b8c3f9e6af8994820c30b40889154f87511014e0 100644 (file)
@@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "SSD*INTEL*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "SAMSUNG*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "SAMSUNG*MZ7KM*",             NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "ST[1248][0248]0[FH]*",       NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
        /*
index ad8d483a35cd5c16d3cbae4019dd345e3c7bb69b..ca7d37e2c7be6bb6aba2a7d4e1459d8c4369fcca 100644 (file)
@@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
                pr_err("CLK %d has invalid pointer %p\n", id, clk);
                return;
        }
-       if (id > unit->nr_clks) {
+       if (id >= unit->nr_clks) {
                pr_err("CLK %d is invalid\n", id);
                return;
        }
index 9781b1bf599884d6ae37b06dea03453dc8caac35..9235a331b588068ac717ed66eb0c3db5b89f6f9f 100644 (file)
@@ -200,11 +200,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
        unsigned int idx = clkspec->args[1];
 
        if (type == CP110_CLK_TYPE_CORE) {
-               if (idx > CP110_MAX_CORE_CLOCKS)
+               if (idx >= CP110_MAX_CORE_CLOCKS)
                        return ERR_PTR(-EINVAL);
                return clk_data->hws[idx];
        } else if (type == CP110_CLK_TYPE_GATABLE) {
-               if (idx > CP110_MAX_GATABLE_CLOCKS)
+               if (idx >= CP110_MAX_GATABLE_CLOCKS)
                        return ERR_PTR(-EINVAL);
                return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
        }
index db9b2471ac401fbb12c02ffb537c64fe718f430d..0a48ed56833b4b554a14c3d71b71edc50a72f348 100644 (file)
@@ -191,6 +191,22 @@ int qcom_cc_register_sleep_clk(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
 
+/* Drop 'protected-clocks' from the list of clocks to register */
+static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc)
+{
+       struct device_node *np = dev->of_node;
+       struct property *prop;
+       const __be32 *p;
+       u32 i;
+
+       of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
+               if (i >= cc->num_rclks)
+                       continue;
+
+               cc->rclks[i] = NULL;
+       }
+}
+
 static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
                                         void *data)
 {
@@ -251,6 +267,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
        cc->rclks = rclks;
        cc->num_rclks = num_clks;
 
+       qcom_cc_drop_protected(dev, cc);
+
        for (i = 0; i < num_clks; i++) {
                if (!rclks[i])
                        continue;
index 9d7d297f0ea8d9b0fbc5c5e5a02c172b0ae14064..f65cc0ff76abdb630b0694eb7c3babcc2c46ff71 100644 (file)
@@ -128,7 +128,7 @@ static const struct zynqmp_eemi_ops *eemi_ops;
  */
 static inline int zynqmp_is_valid_clock(u32 clk_id)
 {
-       if (clk_id > clock_max_idx)
+       if (clk_id >= clock_max_idx)
                return -ENODEV;
 
        return clock[clk_id].valid;
@@ -279,6 +279,9 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
        qdata.arg1 = clk_id;
 
        ret = eemi_ops->query_data(qdata, ret_payload);
+       if (ret)
+               return ERR_PTR(ret);
+
        mult = ret_payload[1];
        div = ret_payload[2];
 
index d0c3e50b39fbd8ab5f15583113ab0b090af2da70..1fc488e90f363ae76c7901e2070fa3c1b77e8ce2 100644 (file)
@@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
 /*
  * Program FIFO size of channels.
  *
- * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
+ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
  * slice FIFO on equal parts between channels.
  */
 static void idma32_fifo_partition(struct dw_dma *dw)
 {
-       u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
+       u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
                    IDMA32C_FP_UPDATE;
        u64 fifo_partition = 0;
 
@@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
        /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
        fifo_partition |= value << 32;
 
-       /* Program FIFO Partition registers - 128 bytes for each channel */
+       /* Program FIFO Partition registers - 64 bytes per channel */
        idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
        idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
 }
index b4ec2d20e66167786939ae867de0d93378ae4054..cb1b44d78a1f23ea19ad6a53982e997f7d9f25f9 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/spinlock.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
@@ -33,6 +32,7 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
+#include <linux/workqueue.h>
 
 #include <asm/irq.h>
 #include <linux/platform_data/dma-imx-sdma.h>
@@ -376,7 +376,7 @@ struct sdma_channel {
        u32                             shp_addr, per_addr;
        enum dma_status                 status;
        struct imx_dma_data             data;
-       struct dma_pool                 *bd_pool;
+       struct work_struct              terminate_worker;
 };
 
 #define IMX_DMA_SG_LOOP                BIT(0)
@@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
 
        return 0;
 }
-
-static int sdma_disable_channel_with_delay(struct dma_chan *chan)
+static void sdma_channel_terminate_work(struct work_struct *work)
 {
-       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
+                                                 terminate_worker);
        unsigned long flags;
        LIST_HEAD(head);
 
-       sdma_disable_channel(chan);
-       spin_lock_irqsave(&sdmac->vc.lock, flags);
-       vchan_get_all_descriptors(&sdmac->vc, &head);
-       sdmac->desc = NULL;
-       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
-       vchan_dma_desc_free_list(&sdmac->vc, &head);
-
        /*
         * According to NXP R&D team a delay of one BD SDMA cost time
         * (maximum is 1ms) should be added after disable of the channel
         * bit, to ensure SDMA core has really been stopped after SDMA
         * clients call .device_terminate_all.
         */
-       mdelay(1);
+       usleep_range(1000, 2000);
+
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
+       vchan_get_all_descriptors(&sdmac->vc, &head);
+       sdmac->desc = NULL;
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+       vchan_dma_desc_free_list(&sdmac->vc, &head);
+}
+
+static int sdma_disable_channel_async(struct dma_chan *chan)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+       sdma_disable_channel(chan);
+
+       if (sdmac->desc)
+               schedule_work(&sdmac->terminate_worker);
 
        return 0;
 }
 
+static void sdma_channel_synchronize(struct dma_chan *chan)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+       vchan_synchronize(&sdmac->vc);
+
+       flush_work(&sdmac->terminate_worker);
+}
+
 static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
 {
        struct sdma_engine *sdma = sdmac->sdma;
@@ -1192,10 +1210,11 @@ out:
 
 static int sdma_alloc_bd(struct sdma_desc *desc)
 {
+       u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
        int ret = 0;
 
-       desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
-                                 &desc->bd_phys);
+       desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+                                       GFP_NOWAIT);
        if (!desc->bd) {
                ret = -ENOMEM;
                goto out;
@@ -1206,7 +1225,9 @@ out:
 
 static void sdma_free_bd(struct sdma_desc *desc)
 {
-       dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+       u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+
+       dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
 }
 
 static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
        if (ret)
                goto disable_clk_ahb;
 
-       sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
-                               sizeof(struct sdma_buffer_descriptor),
-                               32, 0);
-
        return 0;
 
 disable_clk_ahb:
@@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
 
-       sdma_disable_channel_with_delay(chan);
+       sdma_disable_channel_async(chan);
+
+       sdma_channel_synchronize(chan);
 
        if (sdmac->event_id0)
                sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        clk_disable(sdma->clk_ipg);
        clk_disable(sdma->clk_ahb);
-
-       dma_pool_destroy(sdmac->bd_pool);
-       sdmac->bd_pool = NULL;
 }
 
 static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
@@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
 
                sdmac->channel = i;
                sdmac->vc.desc_free = sdma_desc_free;
+               INIT_WORK(&sdmac->terminate_worker,
+                               sdma_channel_terminate_work);
                /*
                 * Add the channel to the DMAC list. Do not add channel 0 though
                 * because we need it internally in the SDMA driver. This also means
@@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
        sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
        sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
        sdma->dma_device.device_config = sdma_config;
-       sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
+       sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+       sdma->dma_device.device_synchronize = sdma_channel_synchronize;
        sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
        sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
        sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
index 1497da3677109c78949cb91adf0907a30eb9b32f..e507ec36c0d3dfa107ccba439551390b9ace1add 100644 (file)
@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
 
        desc_phys = lower_32_bits(c->desc_phys);
        desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
-       if (!cdd->chan_busy[desc_num])
+       if (!cdd->chan_busy[desc_num]) {
+               struct cppi41_channel *cc, *_ct;
+
+               /*
+                * channels might still be in the pendling list if
+                * cppi41_dma_issue_pending() is called after
+                * cppi41_runtime_suspend() is called
+                */
+               list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
+                       if (cc != c)
+                               continue;
+                       list_del(&cc->node);
+                       break;
+               }
                return 0;
+       }
 
        ret = cppi41_tear_down_chan(c);
        if (ret)
index 71d014edd16760d6c37dad72836ed9e13cbfffba..2c22836d3ffd5f060ae2cde22dbd462ef977ee0b 100644 (file)
@@ -168,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active)
        else
                timeout = SIRF_HIBERNATE_TIMEOUT;
 
-       while (retries-- > 0) {
+       do {
                sirf_pulse_on_off(data);
                ret = sirf_wait_for_power_state(data, active, timeout);
                if (ret < 0) {
@@ -179,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active)
                }
 
                break;
-       }
+       } while (retries--);
 
-       if (retries == 0)
+       if (retries < 0)
                return -ETIMEDOUT;
 
        return 0;
index 104b2e0d893bdad124d8f00405d3287b7bc24992..b0fc116296cb3bff55f6b95b432fbc018a29bbde 100644 (file)
@@ -233,7 +233,7 @@ enum amdgpu_kiq_irq {
 
 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
 
 int amdgpu_device_ip_set_clockgating_state(void *dev,
                                           enum amd_ip_block_type block_type,
index f9b54236102d58421d179f230d6968a3dbd39d04..95f4c4139fc60a078d651b8164d11b0befcf766b 100644 (file)
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
        [AMDGPU_HW_IP_UVD_ENC]  =       1,
        [AMDGPU_HW_IP_VCN_DEC]  =       1,
        [AMDGPU_HW_IP_VCN_ENC]  =       1,
+       [AMDGPU_HW_IP_VCN_JPEG] =       1,
 };
 
 static int amdgput_ctx_total_num_entities(void)
index 81732a84c2ab090af4e2f834e2223c0eeabe2bcc..8f3d44e5e78785a18089204113636553de40064d 100644 (file)
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
        if (!info->return_size || !info->return_pointer)
                return -EINVAL;
 
-       /* Ensure IB tests are run on ring */
-       flush_delayed_work(&adev->late_init_work);
-
        switch (info->query) {
        case AMDGPU_INFO_ACCEL_WORKING:
                ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        struct amdgpu_fpriv *fpriv;
        int r, pasid;
 
+       /* Ensure IB tests are run on ring */
+       flush_delayed_work(&adev->late_init_work);
+
        file_priv->driver_priv = NULL;
 
        r = pm_runtime_get_sync(dev->dev);
index 1d3265c97b704b5a403cca7721818ac91dad6c4c..747c068379dc79b5408525dda7d2215b9b128043 100644 (file)
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
                chip_name = "tonga";
                break;
        case CHIP_POLARIS11:
-               chip_name = "polaris11";
+               if (((adev->pdev->device == 0x67ef) &&
+                    ((adev->pdev->revision == 0xe0) ||
+                     (adev->pdev->revision == 0xe5))) ||
+                   ((adev->pdev->device == 0x67ff) &&
+                    ((adev->pdev->revision == 0xcf) ||
+                     (adev->pdev->revision == 0xef) ||
+                     (adev->pdev->revision == 0xff))))
+                       chip_name = "polaris11_k";
+               else if ((adev->pdev->device == 0x67ef) &&
+                        (adev->pdev->revision == 0xe2))
+                       chip_name = "polaris11_k";
+               else
+                       chip_name = "polaris11";
                break;
        case CHIP_POLARIS10:
-               chip_name = "polaris10";
+               if ((adev->pdev->device == 0x67df) &&
+                   ((adev->pdev->revision == 0xe1) ||
+                    (adev->pdev->revision == 0xf7)))
+                       chip_name = "polaris10_k";
+               else
+                       chip_name = "polaris10";
                break;
        case CHIP_POLARIS12:
-               chip_name = "polaris12";
+               if (((adev->pdev->device == 0x6987) &&
+                    ((adev->pdev->revision == 0xc0) ||
+                     (adev->pdev->revision == 0xc3))) ||
+                   ((adev->pdev->device == 0x6981) &&
+                    ((adev->pdev->revision == 0x00) ||
+                     (adev->pdev->revision == 0x01) ||
+                     (adev->pdev->revision == 0x10))))
+                       chip_name = "polaris12_k";
+               else
+                       chip_name = "polaris12";
                break;
        case CHIP_FIJI:
        case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
        const struct mc_firmware_header_v1_0 *hdr;
        const __le32 *fw_data = NULL;
        const __le32 *io_mc_regs = NULL;
-       u32 data, vbios_version;
+       u32 data;
        int i, ucode_size, regs_size;
 
        /* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
        if (amdgpu_sriov_bios(adev))
                return 0;
 
-       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-       data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
-       vbios_version = data & 0xf;
-
-       if (vbios_version == 0)
-               return 0;
-
        if (!adev->gmc.fw)
                return -EINVAL;
 
index eae90922fdbe0f4356be31c4fd16eeb6846409a2..322e09b5b44894183d2c8aab92b1319f634a4fd5 100644 (file)
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle)
        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 
        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
-               vcn_v1_0_stop(adev);
+               vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
        ring->ready = false;
 
index ca925200fe09240ae4f96a2ef7f633726b4cf883..5a6edf65c9eaebd958104d4d0dd8216281ffaccc 100644 (file)
@@ -2554,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info,
 
        cea_revision = drm_connector->display_info.cea_rev;
 
-       strncpy(audio_info->display_name,
+       strscpy(audio_info->display_name,
                edid_caps->display_name,
-               AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+               AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
 
        if (cea_revision >= 3) {
                audio_info->mode_count = edid_caps->audio_mode_count;
@@ -3042,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
                state->underscan_enable = false;
                state->underscan_hborder = 0;
                state->underscan_vborder = 0;
+               state->max_bpc = 8;
 
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@ -3063,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 
        new_state->freesync_capable = state->freesync_capable;
        new_state->freesync_enable = state->freesync_enable;
+       new_state->max_bpc = state->max_bpc;
 
        return &new_state->base;
 }
@@ -3650,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
        mode->hdisplay = hdisplay;
        mode->vdisplay = vdisplay;
        mode->type &= ~DRM_MODE_TYPE_PREFERRED;
-       strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+       strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
 
        return mode;
 
index b459867a05b202e84a1a59c5ea656fc4b4395b2a..a6bcb90e8419af401bbe4650dc1515f32681cda8 100644 (file)
@@ -2512,6 +2512,8 @@ static void pplib_apply_display_requirements(
                        dc,
                        context->bw.dce.sclk_khz);
 
+       pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
        pp_display_cfg->min_engine_clock_deep_sleep_khz
                        = context->bw.dce.sclk_deep_sleep_khz;
 
index 85119c2bdcc8ff2e2bbd54b53e204b095decb2d4..a2a7e0e94aa6b704b015122d413c06d6045b1d00 100644 (file)
@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
        PHM_FUNC_CHECK(hwmgr);
        adev = hwmgr->adev;
 
-       if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
+       /* Skip for suspend/resume case */
+       if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
+               && adev->in_suspend) {
                pr_info("dpm has been enabled\n");
                return 0;
        }
index 47ac9236973947fb369e5b3652900e9d64ab0d93..0173d04800245b44f88f1f80e8dc3cb50df3bd2c 100644 (file)
@@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
 
        switch (task_id) {
        case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+               ret = phm_pre_display_configuration_changed(hwmgr);
+               if (ret)
+                       return ret;
                ret = phm_set_cpu_power_state(hwmgr);
                if (ret)
                        return ret;
index 91ffb7bc4ee72512f9a31aebbce9eaec3939d9d7..56437866d1206c163f36593e2764bfb6bfd96170 100644 (file)
@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
        if (skip)
                return 0;
 
-       phm_pre_display_configuration_changed(hwmgr);
-
        phm_display_configuration_changed(hwmgr);
 
        if (hwmgr->ps)
index 88f6b35ea6fee9cb7bdf40df0eeef3b3d71ee233..b61a01f552840d39a0ce7113de3d6e035d84354f 100644 (file)
@@ -3589,8 +3589,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
        }
 
        if (i >= sclk_table->count) {
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-               sclk_table->dpm_levels[i-1].value = sclk;
+               if (sclk > sclk_table->dpm_levels[i-1].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+                       sclk_table->dpm_levels[i-1].value = sclk;
+               }
        } else {
        /* TODO: Check SCLK in DAL's minimum clocks
         * in case DeepSleep divider update is required.
@@ -3607,8 +3609,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
        }
 
        if (i >= mclk_table->count) {
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-               mclk_table->dpm_levels[i-1].value = mclk;
+               if (mclk > mclk_table->dpm_levels[i-1].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+                       mclk_table->dpm_levels[i-1].value = mclk;
+               }
        }
 
        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
index e2bc6e0c229f96dde7baf50d2f2189efce4c2c2e..79c86247d0ac0324f2282a3fc2ef46006cefc209 100644 (file)
@@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
        }
 
        if (i >= sclk_table->count) {
-               data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-               sclk_table->dpm_levels[i-1].value = sclk;
+               if (sclk > sclk_table->dpm_levels[i-1].value) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+                       sclk_table->dpm_levels[i-1].value = sclk;
+               }
        }
 
        for (i = 0; i < mclk_table->count; i++) {
@@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
        }
 
        if (i >= mclk_table->count) {
-               data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-               mclk_table->dpm_levels[i-1].value = mclk;
+               if (mclk > mclk_table->dpm_levels[i-1].value) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+                       mclk_table->dpm_levels[i-1].value = mclk;
+               }
        }
 
        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
index b4eadd47f3a44a22b95ccd12effd99924a493e24..3367dd30cdd0d1c8c8482afb436885383a736103 100644 (file)
@@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
        return i;
 }
 
-static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
 {
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
        uint32_t min_freq;
        int ret = 0;
 
-       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+          (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
                min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+          (feature_mask & FEATURE_DPM_UCLK_MASK)) {
                min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+       if (data->smu_features[GNLD_DPM_UVD].enabled &&
+          (feature_mask & FEATURE_DPM_UVD_MASK)) {
                min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+       if (data->smu_features[GNLD_DPM_VCE].enabled &&
+          (feature_mask & FEATURE_DPM_VCE_MASK)) {
                min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+          (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
                min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
        return ret;
 }
 
-static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
 {
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
        uint32_t max_freq;
        int ret = 0;
 
-       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+          (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
                max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+          (feature_mask & FEATURE_DPM_UCLK_MASK)) {
                max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+       if (data->smu_features[GNLD_DPM_UVD].enabled &&
+          (feature_mask & FEATURE_DPM_UVD_MASK)) {
                max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+       if (data->smu_features[GNLD_DPM_VCE].enabled &&
+          (feature_mask & FEATURE_DPM_VCE_MASK)) {
                max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+          (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
                max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
                data->dpm_table.mem_table.dpm_state.soft_max_level =
                data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
-       ret = vega20_upload_dpm_min_level(hwmgr);
+       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to highest!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr);
+       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                data->dpm_table.mem_table.dpm_state.soft_max_level =
                data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
-       ret = vega20_upload_dpm_min_level(hwmgr);
+       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to highest!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr);
+       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
        int ret = 0;
 
-       ret = vega20_upload_dpm_min_level(hwmgr);
+       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload DPM Bootup Levels!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr);
+       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload DPM Max Levels!",
                        return ret);
@@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
                data->dpm_table.gfx_table.dpm_state.soft_max_level =
                        data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
 
-               ret = vega20_upload_dpm_min_level(hwmgr);
+               ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to lowest!",
                        return ret);
 
-               ret = vega20_upload_dpm_max_level(hwmgr);
+               ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
                data->dpm_table.mem_table.dpm_state.soft_max_level =
                        data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
 
-               ret = vega20_upload_dpm_min_level(hwmgr);
+               ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to lowest!",
                        return ret);
 
-               ret = vega20_upload_dpm_max_level(hwmgr);
+               ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
index 0cd827e11fa20d8af7f038cecb8ea12465014523..de26df0c6044de127422999c669eeb5d68304a66 100644 (file)
@@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
 {
        struct ast_framebuffer *afb = &afbdev->afb;
 
+       drm_crtc_force_disable_all(dev);
        drm_fb_helper_unregister_fbi(&afbdev->helper);
 
        if (afb->obj) {
index 680566d97adcf652b52eb886fe54326d95817d01..10243965ee7c0219737cb6a4e3fa85b728cd1fe4 100644 (file)
@@ -54,7 +54,7 @@
 #define SN_AUX_ADDR_7_0_REG                    0x76
 #define SN_AUX_LENGTH_REG                      0x77
 #define SN_AUX_CMD_REG                         0x78
-#define  AUX_CMD_SEND                          BIT(1)
+#define  AUX_CMD_SEND                          BIT(0)
 #define  AUX_CMD_REQ(x)                                ((x) << 4)
 #define SN_AUX_RDATA_REG(x)                    (0x79 + (x))
 #define SN_SSC_CONFIG_REG                      0x93
index dd852a25d37540fdf9ee6254f08a68b98df3e9a5..9d64f874f965be1a74970997f3be5ec07df2139c 100644 (file)
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
 static bool drm_leak_fbdev_smem = false;
 module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
-MODULE_PARM_DESC(fbdev_emulation,
+MODULE_PARM_DESC(drm_leak_fbdev_smem,
                 "Allow unsafe leaking fbdev physical smem address [default=false]");
 #endif
 
index 0c4eb4a9ab31f79efff7d6902c542d9a6f69ddd9..51e06defc8d8a0ea7fd2eb770a85dbce94d2b3b7 100644 (file)
@@ -104,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
 int drm_sysfs_connector_add(struct drm_connector *connector);
 void drm_sysfs_connector_remove(struct drm_connector *connector);
 
+void drm_sysfs_lease_event(struct drm_device *dev);
+
 /* drm_gem.c */
 int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
index 24a177ea54176d91b289da8471cbffce3d4ee120..c61680ad962d9ef3189b476fbec507eac8a2a459 100644 (file)
@@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master)
 
        if (master->lessor) {
                /* Tell the master to check the lessee list */
-               drm_sysfs_hotplug_event(dev);
+               drm_sysfs_lease_event(dev);
                drm_master_put(&master->lessor);
        }
 
index b3c1daad1169b806271691c7e373900ef6b27e5e..ecb7b33002bb27de0af599702a354e7c241cd6ed 100644 (file)
@@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
        connector->kdev = NULL;
 }
 
+void drm_sysfs_lease_event(struct drm_device *dev)
+{
+       char *event_string = "LEASE=1";
+       char *envp[] = { event_string, NULL };
+
+       DRM_DEBUG("generating lease event\n");
+
+       kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+
 /**
  * drm_sysfs_hotplug_event - generate a DRM uevent
  * @dev: DRM device
index d4530d60767b816605e9edcc2be921c5425e8c63..ca169f013a14efb4bbe5e26609c8152312ec7493 100644 (file)
@@ -1594,7 +1594,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
                                NULL);
 
        drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
-       plane->crtc = crtc;
 
        /* save user friendly CRTC name for later */
        snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
index 96cdf06e7da21d8f5bf576014f149f2efbbc7298..d31d8281424efb371cf04cfe82220ef3959828fa 100644 (file)
@@ -488,8 +488,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 
        drm_encoder_cleanup(drm_enc);
        mutex_destroy(&dpu_enc->enc_lock);
-
-       kfree(dpu_enc);
 }
 
 void dpu_encoder_helper_split_config(
index bfcd165e96dfe98d8f6ad16cdadc19017c3e7042..d743e7ca6a3c8b2e83e5ccc619e0f3d6070ff4da 100644 (file)
@@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
        INTERLEAVED_RGB_FMT(XBGR8888,
                COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
                C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-               true, 4, 0,
+               false, 4, 0,
                DPU_FETCH_LINEAR, 1),
 
        INTERLEAVED_RGB_FMT(RGBA8888,
index 4c03f0b7343ed655c60111be4d09249bde463b28..41bec570c51848f24f378d0982cb987860fd0047 100644 (file)
@@ -39,6 +39,8 @@
 #define DSI_PIXEL_PLL_CLK              1
 #define NUM_PROVIDED_CLKS              2
 
+#define VCO_REF_CLK_RATE               19200000
+
 struct dsi_pll_regs {
        u32 pll_prop_gain_rate;
        u32 pll_lockdet_rate;
@@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
            parent_rate);
 
        pll_10nm->vco_current_rate = rate;
-       pll_10nm->vco_ref_clk_rate = parent_rate;
+       pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
 
        dsi_pll_setup_config(pll_10nm);
 
index c79659ca570655da77888052fc47a16bc53cf409..adbdce3aeda0039f7779059687774924c7d6561f 100644 (file)
@@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
                goto fail;
        }
 
+       ret = msm_hdmi_hpd_enable(hdmi->connector);
+       if (ret < 0) {
+               DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+               goto fail;
+       }
+
        encoder->bridge = hdmi->bridge;
 
        priv->bridges[priv->num_bridges++]       = hdmi->bridge;
@@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = dev_get_drvdata(master);
        struct msm_drm_private *priv = drm->dev_private;
-       static struct hdmi_platform_config *hdmi_cfg;
+       struct hdmi_platform_config *hdmi_cfg;
        struct hdmi *hdmi;
        struct device_node *of_node = dev->of_node;
        int i, err;
index accc9a61611d35bf9718d66fe09ce6c6e40a549f..5c5df6ab2a573421726a5ca69fbf619841c8303a 100644 (file)
@@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
 
 void msm_hdmi_connector_irq(struct drm_connector *connector);
 struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
+int msm_hdmi_hpd_enable(struct drm_connector *connector);
 
 /*
  * i2c adapter for ddc:
index e9c9a0af508e8c41bc12e91fc13d2f23b5041f33..30e908dfded7ed888267d2c7a4a8211764fc6a22 100644 (file)
@@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
        }
 }
 
-static int hpd_enable(struct hdmi_connector *hdmi_connector)
+int msm_hdmi_hpd_enable(struct drm_connector *connector)
 {
+       struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
        struct hdmi *hdmi = hdmi_connector->hdmi;
        const struct hdmi_platform_config *config = hdmi->config;
        struct device *dev = &hdmi->pdev->dev;
@@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
 {
        struct drm_connector *connector = NULL;
        struct hdmi_connector *hdmi_connector;
-       int ret;
 
        hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
        if (!hdmi_connector)
@@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       ret = hpd_enable(hdmi_connector);
-       if (ret) {
-               dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
        drm_connector_attach_encoder(connector, hdmi->encoder);
 
        return connector;
index 4bcdeca7479db6a2481c2b6e8ad38defe7f280de..2088a20eb27024f8e868d17b1eb04343a3ad37bb 100644 (file)
@@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
                if (!new_crtc_state->active)
                        continue;
 
+               if (drm_crtc_vblank_get(crtc))
+                       continue;
+
                kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+               drm_crtc_vblank_put(crtc);
        }
 }
 
index f0da0d3c8a80f7cf9ab5082095aed6df3e3c9529..d756436c1fcd3293f40db2d9efda018603c496e4 100644 (file)
@@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
-               return ret;
+               goto free_priv;
 
        pm_runtime_get_sync(&gpu->pdev->dev);
        show_priv->state = gpu->funcs->gpu_state_get(gpu);
@@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
 
        if (IS_ERR(show_priv->state)) {
                ret = PTR_ERR(show_priv->state);
-               kfree(show_priv);
-               return ret;
+               goto free_priv;
        }
 
        show_priv->dev = dev;
 
-       return single_open(file, msm_gpu_show, show_priv);
+       ret = single_open(file, msm_gpu_show, show_priv);
+       if (ret)
+               goto free_priv;
+
+       return 0;
+
+free_priv:
+       kfree(show_priv);
+       return ret;
 }
 
 static const struct file_operations msm_gpu_fops = {
index 4904d0d414094f7f7c6bdb79226bf7430f5f99aa..dcff812c63d0739ee3dd867e63a503f1bee5e1f1 100644 (file)
@@ -553,17 +553,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                        kthread_run(kthread_worker_fn,
                                &priv->disp_thread[i].worker,
                                "crtc_commit:%d", priv->disp_thread[i].crtc_id);
-               ret = sched_setscheduler(priv->disp_thread[i].thread,
-                                                       SCHED_FIFO, &param);
-               if (ret)
-                       pr_warn("display thread priority update failed: %d\n",
-                                                                       ret);
-
                if (IS_ERR(priv->disp_thread[i].thread)) {
                        dev_err(dev, "failed to create crtc_commit kthread\n");
                        priv->disp_thread[i].thread = NULL;
+                       goto err_msm_uninit;
                }
 
+               ret = sched_setscheduler(priv->disp_thread[i].thread,
+                                        SCHED_FIFO, &param);
+               if (ret)
+                       dev_warn(dev, "disp_thread set priority failed: %d\n",
+                                ret);
+
                /* initialize event thread */
                priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
                kthread_init_worker(&priv->event_thread[i].worker);
@@ -572,6 +573,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                        kthread_run(kthread_worker_fn,
                                &priv->event_thread[i].worker,
                                "crtc_event:%d", priv->event_thread[i].crtc_id);
+               if (IS_ERR(priv->event_thread[i].thread)) {
+                       dev_err(dev, "failed to create crtc_event kthread\n");
+                       priv->event_thread[i].thread = NULL;
+                       goto err_msm_uninit;
+               }
+
                /**
                 * event thread should also run at same priority as disp_thread
                 * because it is handling frame_done events. A lower priority
@@ -580,34 +587,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                 * failure at crtc commit level.
                 */
                ret = sched_setscheduler(priv->event_thread[i].thread,
-                                                       SCHED_FIFO, &param);
+                                        SCHED_FIFO, &param);
                if (ret)
-                       pr_warn("display event thread priority update failed: %d\n",
-                                                                       ret);
-
-               if (IS_ERR(priv->event_thread[i].thread)) {
-                       dev_err(dev, "failed to create crtc_event kthread\n");
-                       priv->event_thread[i].thread = NULL;
-               }
-
-               if ((!priv->disp_thread[i].thread) ||
-                               !priv->event_thread[i].thread) {
-                       /* clean up previously created threads if any */
-                       for ( ; i >= 0; i--) {
-                               if (priv->disp_thread[i].thread) {
-                                       kthread_stop(
-                                               priv->disp_thread[i].thread);
-                                       priv->disp_thread[i].thread = NULL;
-                               }
-
-                               if (priv->event_thread[i].thread) {
-                                       kthread_stop(
-                                               priv->event_thread[i].thread);
-                                       priv->event_thread[i].thread = NULL;
-                               }
-                       }
-                       goto err_msm_uninit;
-               }
+                       dev_warn(dev, "event_thread set priority failed:%d\n",
+                                ret);
        }
 
        ret = drm_vblank_init(ddev, priv->num_crtcs);
index 7a7923e6220da89b252997d9f9bb674e0b7802dd..6942604ad9a8b832b8425f90012ba688da3983aa 100644 (file)
@@ -317,6 +317,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
        uint32_t *ptr;
        int ret = 0;
 
+       if (!nr_relocs)
+               return 0;
+
        if (offset % 4) {
                DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
                return -EINVAL;
@@ -410,7 +413,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_file_private *ctx = file->driver_priv;
        struct msm_gem_submit *submit;
        struct msm_gpu *gpu = priv->gpu;
-       struct dma_fence *in_fence = NULL;
        struct sync_file *sync_file = NULL;
        struct msm_gpu_submitqueue *queue;
        struct msm_ringbuffer *ring;
@@ -443,6 +445,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        ring = gpu->rb[queue->prio];
 
        if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+               struct dma_fence *in_fence;
+
                in_fence = sync_file_get_fence(args->fence_fd);
 
                if (!in_fence)
@@ -452,11 +456,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                 * Wait if the fence is from a foreign context, or if the fence
                 * array contains any fence from a foreign context.
                 */
-               if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
+               ret = 0;
+               if (!dma_fence_match_context(in_fence, ring->fctx->context))
                        ret = dma_fence_wait(in_fence, true);
-                       if (ret)
-                               return ret;
-               }
+
+               dma_fence_put(in_fence);
+               if (ret)
+                       return ret;
        }
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -582,8 +588,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        }
 
 out:
-       if (in_fence)
-               dma_fence_put(in_fence);
        submit_cleanup(submit);
        if (ret)
                msm_gem_submit_free(submit);
index 11aac83370664f45ce5c8a39e6bb6b284581ae40..2b7c8946adba97983a79a6f78dae85ffc269102c 100644 (file)
@@ -345,6 +345,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 {
        struct msm_gpu_state *state;
 
+       /* Check if the target supports capturing crash state */
+       if (!gpu->funcs->gpu_state_get)
+               return;
+
        /* Only save one crash state at a time */
        if (gpu->crashstate)
                return;
@@ -434,10 +438,9 @@ static void recover_worker(struct work_struct *work)
        if (submit) {
                struct task_struct *task;
 
-               rcu_read_lock();
-               task = pid_task(submit->pid, PIDTYPE_PID);
+               task = get_pid_task(submit->pid, PIDTYPE_PID);
                if (task) {
-                       comm = kstrdup(task->comm, GFP_ATOMIC);
+                       comm = kstrdup(task->comm, GFP_KERNEL);
 
                        /*
                         * So slightly annoying, in other paths like
@@ -450,10 +453,10 @@ static void recover_worker(struct work_struct *work)
                         * about the submit going away.
                         */
                        mutex_unlock(&dev->struct_mutex);
-                       cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
+                       cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+                       put_task_struct(task);
                        mutex_lock(&dev->struct_mutex);
                }
-               rcu_read_unlock();
 
                if (comm && cmd) {
                        dev_err(dev->dev, "%s: offending task: %s (%s)\n",
index b23d33622f374b0ce88791914b53cb126899676b..2a90aa4caec081b2349ce115d77f4225d22ab3a4 100644 (file)
@@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
 //     pm_runtime_get_sync(mmu->dev);
        ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
 //     pm_runtime_put_sync(mmu->dev);
-       WARN_ON(ret < 0);
+       WARN_ON(!ret);
 
        return (ret == len) ? 0 : -EINVAL;
 }
index cca9334584391d97a4026f6ae48bfdb8d7f12ae9..0c2c8d2c631f309791a91b388d7d370c969e2f20 100644 (file)
@@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
                uint64_t iova, uint32_t size)
 {
        struct msm_gem_object *obj = submit->bos[idx].obj;
+       unsigned offset = 0;
        const char *buf;
 
        if (iova) {
-               buf += iova - submit->bos[idx].iova;
+               offset = iova - submit->bos[idx].iova;
        } else {
                iova = submit->bos[idx].iova;
                size = obj->base.size;
@@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd,
        if (IS_ERR(buf))
                return;
 
+       buf += offset;
+
        rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
 
        msm_gem_put_vaddr(&obj->base);
index 1f8161b041be6c592f826820818ea352ae3fdfe2..465120809eb3bb621343002b4f149a3c44787eca 100644 (file)
@@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
        dssdev->type = OMAP_DISPLAY_TYPE_DPI;
        dssdev->owner = THIS_MODULE;
        dssdev->of_ports = BIT(0);
+       drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
 
        omapdss_display_init(dssdev);
        omapdss_device_register(dssdev);
index 0a485c5b982eb84addaf013dd8241509ca403d04..00a9c2ab9e6c8932baecc9b5591b07626201c589 100644 (file)
@@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev)
                dsi->num_lanes_supported = 3;
        }
 
+       r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+       if (r) {
+               DSSERR("Failed to populate DSI child devices: %d\n", r);
+               goto err_pm_disable;
+       }
+
        r = dsi_init_output(dsi);
        if (r)
-               goto err_pm_disable;
+               goto err_of_depopulate;
 
        r = dsi_probe_of(dsi);
        if (r) {
@@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev)
                goto err_uninit_output;
        }
 
-       r = of_platform_populate(dev->of_node, NULL, NULL, dev);
-       if (r) {
-               DSSERR("Failed to populate DSI child devices: %d\n", r);
-               goto err_uninit_output;
-       }
-
        r = component_add(&pdev->dev, &dsi_component_ops);
        if (r)
-               goto err_of_depopulate;
+               goto err_uninit_output;
 
        return 0;
 
-err_of_depopulate:
-       of_platform_depopulate(dev);
 err_uninit_output:
        dsi_uninit_output(dsi);
+err_of_depopulate:
+       of_platform_depopulate(dev);
 err_pm_disable:
        pm_runtime_disable(dev);
        return r;
index 1f698a95a94a57d4a03626666c7f755f4fce6fae..33e15cb77efa79afbcc3d46c17eb045b4a5d3c57 100644 (file)
@@ -432,7 +432,7 @@ struct omap_dss_device {
        const struct omap_dss_driver *driver;
        const struct omap_dss_device_ops *ops;
        unsigned long ops_flags;
-       unsigned long bus_flags;
+       u32 bus_flags;
 
        /* helper variable for driver suspend/resume */
        bool activate_after_resume;
index 452e625f6ce331a24a13afe8c40209cab5e0b0d1..933ebc9f9faaaf35049a53aef49551e3ff1e740a 100644 (file)
@@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
        .destroy = omap_encoder_destroy,
 };
 
+static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct omap_dss_device *dssdev = omap_encoder->output;
+       struct drm_connector *connector;
+       bool hdmi_mode;
+
+       hdmi_mode = false;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       hdmi_mode = omap_connector_get_hdmi_mode(connector);
+                       break;
+               }
+       }
+
+       if (dssdev->ops->hdmi.set_hdmi_mode)
+               dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
+
+       if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
+               struct hdmi_avi_infoframe avi;
+               int r;
+
+               r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
+                                                            false);
+               if (r == 0)
+                       dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
+       }
+}
+
 static void omap_encoder_mode_set(struct drm_encoder *encoder,
                                  struct drm_display_mode *mode,
                                  struct drm_display_mode *adjusted_mode)
 {
-       struct drm_device *dev = encoder->dev;
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_connector *connector;
        struct omap_dss_device *dssdev;
        struct videomode vm = { 0 };
-       bool hdmi_mode;
-       int r;
 
        drm_display_mode_to_videomode(adjusted_mode, &vm);
 
@@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        /* Set the HDMI mode and HDMI infoframe if applicable. */
-       hdmi_mode = false;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       hdmi_mode = omap_connector_get_hdmi_mode(connector);
-                       break;
-               }
-       }
-
-       dssdev = omap_encoder->output;
-
-       if (dssdev->ops->hdmi.set_hdmi_mode)
-               dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
-       if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
-               struct hdmi_avi_infoframe avi;
-
-               r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
-                                                            false);
-               if (r == 0)
-                       dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
-       }
+       if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
+               omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
 }
 
 static void omap_encoder_disable(struct drm_encoder *encoder)
index ba80150d10524802271be2ce89f9144c0a98bee2..895d77d799e4fd9aad3f8715ae4c10f1fe5a7c75 100644 (file)
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        if (!fbo)
                return -ENOMEM;
 
-       ttm_bo_get(bo);
        fbo->base = *bo;
+       fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+       ttm_bo_get(bo);
        fbo->bo = bo;
 
        /**
index b372854cf38d3221d8598affb395845fb42ce930..704049e62d58ac9a9cd1e7c8bb4a3e1c222eb42d 100644 (file)
@@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
                hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
                                 input_dev->input_buf, len, 1);
 
-               pm_wakeup_event(&input_dev->device->device, 0);
+               pm_wakeup_hard_event(&input_dev->device->device);
 
                break;
        default:
index 6277597d3d5818145c93eb561ff3f5f1017a0202..edd34c167a9bd44e70c34e24e506798562dc28a4 100644 (file)
@@ -435,61 +435,16 @@ void vmbus_free_channels(void)
        }
 }
 
-/*
- * vmbus_process_offer - Process the offer by creating a channel/device
- * associated with this offer
- */
-static void vmbus_process_offer(struct vmbus_channel *newchannel)
+/* Note: the function can run concurrently for primary/sub channels. */
+static void vmbus_add_channel_work(struct work_struct *work)
 {
-       struct vmbus_channel *channel;
-       bool fnew = true;
+       struct vmbus_channel *newchannel =
+               container_of(work, struct vmbus_channel, add_channel_work);
+       struct vmbus_channel *primary_channel = newchannel->primary_channel;
        unsigned long flags;
        u16 dev_type;
        int ret;
 
-       /* Make sure this is a new offer */
-       mutex_lock(&vmbus_connection.channel_mutex);
-
-       /*
-        * Now that we have acquired the channel_mutex,
-        * we can release the potentially racing rescind thread.
-        */
-       atomic_dec(&vmbus_connection.offer_in_progress);
-
-       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
-               if (!uuid_le_cmp(channel->offermsg.offer.if_type,
-                       newchannel->offermsg.offer.if_type) &&
-                       !uuid_le_cmp(channel->offermsg.offer.if_instance,
-                               newchannel->offermsg.offer.if_instance)) {
-                       fnew = false;
-                       break;
-               }
-       }
-
-       if (fnew)
-               list_add_tail(&newchannel->listentry,
-                             &vmbus_connection.chn_list);
-
-       mutex_unlock(&vmbus_connection.channel_mutex);
-
-       if (!fnew) {
-               /*
-                * Check to see if this is a sub-channel.
-                */
-               if (newchannel->offermsg.offer.sub_channel_index != 0) {
-                       /*
-                        * Process the sub-channel.
-                        */
-                       newchannel->primary_channel = channel;
-                       spin_lock_irqsave(&channel->lock, flags);
-                       list_add_tail(&newchannel->sc_list, &channel->sc_list);
-                       channel->num_sc++;
-                       spin_unlock_irqrestore(&channel->lock, flags);
-               } else {
-                       goto err_free_chan;
-               }
-       }
-
        dev_type = hv_get_dev_type(newchannel);
 
        init_vp_index(newchannel, dev_type);
@@ -507,27 +462,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        /*
         * This state is used to indicate a successful open
         * so that when we do close the channel normally, we
-        * can cleanup properly
+        * can cleanup properly.
         */
        newchannel->state = CHANNEL_OPEN_STATE;
 
-       if (!fnew) {
-               struct hv_device *dev
-                       = newchannel->primary_channel->device_obj;
+       if (primary_channel != NULL) {
+               /* newchannel is a sub-channel. */
+               struct hv_device *dev = primary_channel->device_obj;
 
                if (vmbus_add_channel_kobj(dev, newchannel))
-                       goto err_free_chan;
+                       goto err_deq_chan;
+
+               if (primary_channel->sc_creation_callback != NULL)
+                       primary_channel->sc_creation_callback(newchannel);
 
-               if (channel->sc_creation_callback != NULL)
-                       channel->sc_creation_callback(newchannel);
                newchannel->probe_done = true;
                return;
        }
 
        /*
-        * Start the process of binding this offer to the driver
-        * We need to set the DeviceObject field before calling
-        * vmbus_child_dev_add()
+        * Start the process of binding the primary channel to the driver
         */
        newchannel->device_obj = vmbus_device_create(
                &newchannel->offermsg.offer.if_type,
@@ -556,13 +510,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 
 err_deq_chan:
        mutex_lock(&vmbus_connection.channel_mutex);
-       list_del(&newchannel->listentry);
+
+       /*
+        * We need to set the flag, otherwise
+        * vmbus_onoffer_rescind() can be blocked.
+        */
+       newchannel->probe_done = true;
+
+       if (primary_channel == NULL) {
+               list_del(&newchannel->listentry);
+       } else {
+               spin_lock_irqsave(&primary_channel->lock, flags);
+               list_del(&newchannel->sc_list);
+               spin_unlock_irqrestore(&primary_channel->lock, flags);
+       }
+
        mutex_unlock(&vmbus_connection.channel_mutex);
 
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(newchannel->target_cpu,
-                                        percpu_channel_deq, newchannel, true);
+                                        percpu_channel_deq,
+                                        newchannel, true);
        } else {
                percpu_channel_deq(newchannel);
                put_cpu();
@@ -570,14 +539,104 @@ err_deq_chan:
 
        vmbus_release_relid(newchannel->offermsg.child_relid);
 
-err_free_chan:
        free_channel(newchannel);
 }
 
+/*
+ * vmbus_process_offer - Process the offer by creating a channel/device
+ * associated with this offer
+ */
+static void vmbus_process_offer(struct vmbus_channel *newchannel)
+{
+       struct vmbus_channel *channel;
+       struct workqueue_struct *wq;
+       unsigned long flags;
+       bool fnew = true;
+
+       mutex_lock(&vmbus_connection.channel_mutex);
+
+       /*
+        * Now that we have acquired the channel_mutex,
+        * we can release the potentially racing rescind thread.
+        */
+       atomic_dec(&vmbus_connection.offer_in_progress);
+
+       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+               if (!uuid_le_cmp(channel->offermsg.offer.if_type,
+                                newchannel->offermsg.offer.if_type) &&
+                   !uuid_le_cmp(channel->offermsg.offer.if_instance,
+                                newchannel->offermsg.offer.if_instance)) {
+                       fnew = false;
+                       break;
+               }
+       }
+
+       if (fnew)
+               list_add_tail(&newchannel->listentry,
+                             &vmbus_connection.chn_list);
+       else {
+               /*
+                * Check to see if this is a valid sub-channel.
+                */
+               if (newchannel->offermsg.offer.sub_channel_index == 0) {
+                       mutex_unlock(&vmbus_connection.channel_mutex);
+                       /*
+                        * Don't call free_channel(), because newchannel->kobj
+                        * is not initialized yet.
+                        */
+                       kfree(newchannel);
+                       WARN_ON_ONCE(1);
+                       return;
+               }
+               /*
+                * Process the sub-channel.
+                */
+               newchannel->primary_channel = channel;
+               spin_lock_irqsave(&channel->lock, flags);
+               list_add_tail(&newchannel->sc_list, &channel->sc_list);
+               spin_unlock_irqrestore(&channel->lock, flags);
+       }
+
+       mutex_unlock(&vmbus_connection.channel_mutex);
+
+       /*
+        * vmbus_process_offer() mustn't call channel->sc_creation_callback()
+        * directly for sub-channels, because sc_creation_callback() ->
+        * vmbus_open() may never get the host's response to the
+        * OPEN_CHANNEL message (the host may rescind a channel at any time,
+        * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
+        * may not wake up the vmbus_open() as it's blocked due to a non-zero
+        * vmbus_connection.offer_in_progress, and finally we have a deadlock.
+        *
+        * The above is also true for primary channels, if the related device
+        * drivers use sync probing mode by default.
+        *
+        * And, usually the handling of primary channels and sub-channels can
+        * depend on each other, so we should offload them to different
+        * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
+        * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
+        * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
+        * and waits for all the sub-channels to appear, but the latter
+        * can't get the rtnl_lock and this blocks the handling of
+        * sub-channels.
+        */
+       INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
+       wq = fnew ? vmbus_connection.handle_primary_chan_wq :
+                   vmbus_connection.handle_sub_chan_wq;
+       queue_work(wq, &newchannel->add_channel_work);
+}
+
 /*
  * We use this state to statically distribute the channel interrupt load.
  */
 static int next_numa_node_id;
+/*
+ * init_vp_index() accesses global variables like next_numa_node_id, and
+ * it can run concurrently for primary channels and sub-channels: see
+ * vmbus_process_offer(), so we need the lock to protect the global
+ * variables.
+ */
+static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
 
 /*
  * Starting with Win8, we can statically distribute the incoming
@@ -613,6 +672,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
                return;
        }
 
+       spin_lock(&bind_channel_to_cpu_lock);
+
        /*
         * Based on the channel affinity policy, we will assign the NUMA
         * nodes.
@@ -695,6 +756,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
        channel->target_cpu = cur_cpu;
        channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
 
+       spin_unlock(&bind_channel_to_cpu_lock);
+
        free_cpumask_var(available_mask);
 }
 
index f4d08c8ac7f8ff8f101cbe477826a0924b63170d..4fe117b761ce03a3d6351b86270d08853131d355 100644 (file)
@@ -190,6 +190,20 @@ int vmbus_connect(void)
                goto cleanup;
        }
 
+       vmbus_connection.handle_primary_chan_wq =
+               create_workqueue("hv_pri_chan");
+       if (!vmbus_connection.handle_primary_chan_wq) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       vmbus_connection.handle_sub_chan_wq =
+               create_workqueue("hv_sub_chan");
+       if (!vmbus_connection.handle_sub_chan_wq) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
        INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
        spin_lock_init(&vmbus_connection.channelmsg_lock);
 
@@ -280,10 +294,14 @@ void vmbus_disconnect(void)
         */
        vmbus_initiate_unload(false);
 
-       if (vmbus_connection.work_queue) {
-               drain_workqueue(vmbus_connection.work_queue);
+       if (vmbus_connection.handle_sub_chan_wq)
+               destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
+
+       if (vmbus_connection.handle_primary_chan_wq)
+               destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+
+       if (vmbus_connection.work_queue)
                destroy_workqueue(vmbus_connection.work_queue);
-       }
 
        if (vmbus_connection.int_page) {
                free_pages((unsigned long)vmbus_connection.int_page, 0);
index 72eaba3d50fc26da141993c5f1eadb9916d1f94d..87d3d7da78f876198e0a160f3c39b60044fbcad4 100644 (file)
@@ -335,7 +335,14 @@ struct vmbus_connection {
        struct list_head chn_list;
        struct mutex channel_mutex;
 
+       /*
+        * An offer message is handled first on the work_queue, and then
+        * is further handled on handle_primary_chan_wq or
+        * handle_sub_chan_wq.
+        */
        struct workqueue_struct *work_queue;
+       struct workqueue_struct *handle_primary_chan_wq;
+       struct workqueue_struct *handle_sub_chan_wq;
 };
 
 
index 8e60048a33f8f88b5e10cf48d0cfc3a84f781424..51d34959709bade4c9baed0f14770ec4cb9719ea 100644 (file)
@@ -74,8 +74,7 @@
                                 MST_STATUS_ND)
 #define   MST_STATUS_ERR       (MST_STATUS_NAK | \
                                 MST_STATUS_AL  | \
-                                MST_STATUS_IP  | \
-                                MST_STATUS_TSS)
+                                MST_STATUS_IP)
 #define MST_TX_BYTES_XFRD      0x50
 #define MST_RX_BYTES_XFRD      0x54
 #define SCL_HIGH_PERIOD                0x80
@@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
                         */
                        if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
                                idev->msg_err = -EPROTO;
-                               i2c_int_disable(idev, ~0);
+                               i2c_int_disable(idev, ~MST_STATUS_TSS);
                                complete(&idev->msg_complete);
                                break;
                        }
@@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
 
        if (status & MST_STATUS_SCC) {
                /* Stop completed */
-               i2c_int_disable(idev, ~0);
+               i2c_int_disable(idev, ~MST_STATUS_TSS);
                complete(&idev->msg_complete);
        } else if (status & MST_STATUS_SNS) {
                /* Transfer done */
-               i2c_int_disable(idev, ~0);
+               i2c_int_disable(idev, ~MST_STATUS_TSS);
                if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
                        axxia_i2c_empty_rx_fifo(idev);
                complete(&idev->msg_complete);
+       } else if (status & MST_STATUS_TSS) {
+               /* Transfer timeout */
+               idev->msg_err = -ETIMEDOUT;
+               i2c_int_disable(idev, ~MST_STATUS_TSS);
+               complete(&idev->msg_complete);
        } else if (unlikely(status & MST_STATUS_ERR)) {
                /* Transfer error */
                i2c_int_disable(idev, ~0);
@@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
        u32 rx_xfer, tx_xfer;
        u32 addr_1, addr_2;
        unsigned long time_left;
+       unsigned int wt_value;
 
        idev->msg = msg;
        idev->msg_xfrd = 0;
-       idev->msg_err = 0;
        reinit_completion(&idev->msg_complete);
 
        if (i2c_m_ten(msg)) {
@@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
        else if (axxia_i2c_fill_tx_fifo(idev) != 0)
                int_mask |= MST_STATUS_TFL;
 
+       wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
+       /* Disable wait timer temporarly */
+       writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
+       /* Check if timeout error happened */
+       if (idev->msg_err)
+               goto out;
+
        /* Start manual mode */
        writel(CMD_MANUAL, idev->base + MST_COMMAND);
 
+       writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
+
        i2c_int_enable(idev, int_mask);
 
        time_left = wait_for_completion_timeout(&idev->msg_complete,
@@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
        if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
                dev_warn(idev->dev, "busy after xfer\n");
 
-       if (time_left == 0)
+       if (time_left == 0) {
                idev->msg_err = -ETIMEDOUT;
-
-       if (idev->msg_err == -ETIMEDOUT)
                i2c_recover_bus(&idev->adapter);
+               axxia_i2c_init(idev);
+       }
 
-       if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
+out:
+       if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
+                       idev->msg_err != -ETIMEDOUT)
                axxia_i2c_init(idev);
 
        return idev->msg_err;
@@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
 
 static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
 {
-       u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
+       u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
        unsigned long time_left;
 
        reinit_completion(&idev->msg_complete);
@@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        int i;
        int ret = 0;
 
+       idev->msg_err = 0;
+       i2c_int_enable(idev, MST_STATUS_TSS);
+
        for (i = 0; ret == 0 && i < num; ++i)
                ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
 
index 8822357bca0c3d0db51e4a5d7ba1cf751374e505..e99c3bb5835137c8ad02cda35b6475eacc363355 100644 (file)
@@ -89,7 +89,7 @@ static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
 
        if (time_is_before_jiffies(target)) {
                dev_err(i2cd->dev, "i2c timeout error %x\n", val);
-               return -ETIME;
+               return -ETIMEDOUT;
        }
 
        val = readl(i2cd->regs + I2C_MST_CNTL);
@@ -97,9 +97,9 @@ static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
        case I2C_MST_CNTL_STATUS_OKAY:
                return 0;
        case I2C_MST_CNTL_STATUS_NO_ACK:
-               return -EIO;
+               return -ENXIO;
        case I2C_MST_CNTL_STATUS_TIMEOUT:
-               return -ETIME;
+               return -ETIMEDOUT;
        default:
                return 0;
        }
@@ -218,6 +218,7 @@ stop:
 
 static const struct i2c_adapter_quirks gpu_i2c_quirks = {
        .max_read_len = 4,
+       .max_comb_2nd_msg_len = 4,
        .flags = I2C_AQ_COMB_WRITE_THEN_READ,
 };
 
index 4aa7dde876f3f23dd38e2799270b1340aca3af2c..254e6219e5389f17114185c57470914562c2bed6 100644 (file)
@@ -779,6 +779,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        pm_runtime_get_sync(dev);
 
+       /* Check bus state before init otherwise bus busy info will be lost */
+       ret = rcar_i2c_bus_barrier(priv);
+       if (ret < 0)
+               goto out;
+
        /* Gen3 needs a reset before allowing RXDMA once */
        if (priv->devtype == I2C_RCAR_GEN3) {
                priv->flags |= ID_P_NO_RXDMA;
@@ -791,10 +796,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        rcar_i2c_init(priv);
 
-       ret = rcar_i2c_bus_barrier(priv);
-       if (ret < 0)
-               goto out;
-
        for (i = 0; i < num; i++)
                rcar_i2c_request_dma(priv, msgs + i);
 
index 7e9a2bbf5ddcb967459367778a834c8314ff6f2b..ff3f4553648f3c29a8c576172fc4c342cef6a94b 100644 (file)
@@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
 {
        struct acpi_smbus_cmi *smbus_cmi;
        const struct acpi_device_id *id;
+       int ret;
 
        smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
        if (!smbus_cmi)
@@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
        acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
                            acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
 
-       if (smbus_cmi->cap_info == 0)
+       if (smbus_cmi->cap_info == 0) {
+               ret = -ENODEV;
                goto err;
+       }
 
        snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
                "SMBus CMI adapter %s",
@@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
        smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
        smbus_cmi->adapter.dev.parent = &device->dev;
 
-       if (i2c_add_adapter(&smbus_cmi->adapter)) {
+       ret = i2c_add_adapter(&smbus_cmi->adapter);
+       if (ret) {
                dev_err(&device->dev, "Couldn't register adapter!\n");
                goto err;
        }
@@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
 err:
        kfree(smbus_cmi);
        device->driver_data = NULL;
-       return -EIO;
+       return ret;
 }
 
 static int acpi_smbus_cmi_remove(struct acpi_device *device)
index dd384743dbbd0581ffc2b3c7f179e158bebdf10b..03da4a539a2f2ab9289b7d1ce589d8fa75f3511b 100644 (file)
@@ -173,8 +173,6 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
                "interrupt: enabled_irqs=%04x, irq_status=%04x\n",
                priv->enabled_irqs, irq_status);
 
-       uniphier_fi2c_clear_irqs(priv, irq_status);
-
        if (irq_status & UNIPHIER_FI2C_INT_STOP)
                goto complete;
 
@@ -214,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
 
        if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) {
                uniphier_fi2c_drain_rxfifo(priv);
-               if (!priv->len)
+               /*
+                * If the number of bytes to read is multiple of the FIFO size
+                * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little
+                * earlier than INT_RB. We wait for INT_RB to confirm the
+                * completion of the current message.
+                */
+               if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB))
                        goto data_done;
 
                if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) {
@@ -253,12 +257,20 @@ complete:
        }
 
 handled:
+       /*
+        * This controller makes a pause while any bit of the IRQ status is
+        * asserted. Clear the asserted bit to kick the controller just before
+        * exiting the handler.
+        */
+       uniphier_fi2c_clear_irqs(priv, irq_status);
+
        spin_unlock(&priv->lock);
 
        return IRQ_HANDLED;
 }
 
-static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
+static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr,
+                                 bool repeat)
 {
        priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE;
        uniphier_fi2c_set_irqs(priv);
@@ -268,8 +280,12 @@ static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
        /* set slave address */
        writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1,
               priv->membase + UNIPHIER_FI2C_DTTX);
-       /* first chunk of data */
-       uniphier_fi2c_fill_txfifo(priv, true);
+       /*
+        * First chunk of data. For a repeated START condition, do not write
+        * data to the TX fifo here to avoid the timing issue.
+        */
+       if (!repeat)
+               uniphier_fi2c_fill_txfifo(priv, true);
 }
 
 static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr)
@@ -350,7 +366,7 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
        if (is_read)
                uniphier_fi2c_rx_init(priv, msg->addr);
        else
-               uniphier_fi2c_tx_init(priv, msg->addr);
+               uniphier_fi2c_tx_init(priv, msg->addr, repeat);
 
        dev_dbg(&adap->dev, "start condition\n");
        /*
@@ -502,9 +518,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv)
 
        uniphier_fi2c_reset(priv);
 
+       /*
+        *  Standard-mode: tLOW + tHIGH = 10 us
+        *  Fast-mode:     tLOW + tHIGH = 2.5 us
+        */
        writel(cyc, priv->membase + UNIPHIER_FI2C_CYC);
-       writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL);
+       /*
+        *  Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us
+        *  Fast-mode:     tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us
+        * "tLow/tHIGH = 5/4" meets both.
+        */
+       writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL);
+       /*
+        *  Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us
+        *  Fast-mode:     tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us
+        */
        writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT);
+       /*
+        *  Standard-mode: tSU;DAT = 250 ns
+        *  Fast-mode:     tSU;DAT = 100 ns
+        */
        writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT);
 
        uniphier_fi2c_prepare_operation(priv);
index 454f914ae66dbd49931575122bb7c7dea662b11b..c488e558aef709ee5097f05436624807df26ac4e 100644 (file)
@@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv)
 
        uniphier_i2c_reset(priv, true);
 
-       writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
+       /*
+        * Bit30-16: clock cycles of tLOW.
+        *  Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us
+        *  Fast-mode:     tLOW = 1.3 us, tHIGH = 0.6 us
+        * "tLow/tHIGH = 5/4" meets both.
+        */
+       writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
 
        uniphier_i2c_reset(priv, false);
 }
index 45c9974303328b29af5a32d2ed5b47155aaa8e32..4c8c7a620d08dae851de513eebe2710dee3ca89e 100644 (file)
@@ -614,18 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ide_drivers_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, &ide_drivers_show, NULL);
-}
-
-static const struct file_operations ide_drivers_operations = {
-       .owner          = THIS_MODULE,
-       .open           = ide_drivers_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ide_drivers);
 
 void proc_ide_create(void)
 {
@@ -634,7 +623,7 @@ void proc_ide_create(void)
        if (!proc_ide_root)
                return;
 
-       proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
+       proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
 }
 
 void proc_ide_destroy(void)
index c5b902b86b444773519edc1a52fe4d8782992f37..203ed4adc04ae6680de39910985b05996c227528 100644 (file)
@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
        struct device_node *root = of_find_node_by_path("/");
        const char *model = of_get_property(root, "model", NULL);
 
+       of_node_put(root);
        /* Get cable type from device-tree. */
        if (cable && !strncmp(cable, "80-", 3)) {
                /* Some drives fail to detect 80c cable in PowerBook */
index d4b9db487b16fa3f9a87e4f5fd6732a8b4d9c9b4..cfc8b94527b97cda3f4b20782af0fbc2b6260f2b 100644 (file)
@@ -480,18 +480,18 @@ static const u8 xboxone_hori_init[] = {
 };
 
 /*
- * This packet is required for some of the PDP pads to start
+ * This packet is required for most (all?) of the PDP pads to start
  * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4).
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
  */
 static const u8 xboxone_pdp_init1[] = {
        0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
 };
 
 /*
- * This packet is required for some of the PDP pads to start
+ * This packet is required for most (all?) of the PDP pads to start
  * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4).
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
  */
 static const u8 xboxone_pdp_init2[] = {
        0x06, 0x20, 0x00, 0x02, 0x01, 0x00
@@ -527,12 +527,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
+       XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
+       XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
index 7e75835e220f29f2140f26d50ab0403e774f16dd..850bb259c20ebbba2c68f7dd88c6cfb83c9192a1 100644 (file)
@@ -841,7 +841,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
        if (param[0] != 3) {
                param[0] = 2;
                if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET))
-               return 2;
+                       return 2;
        }
 
        ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR);
index 81be6f781f0b60207088114e0b72b6e0e12d1b14..d5600118159835321c3c55b1d10e1cf4468cc22e 100644 (file)
@@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
        for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) {
                const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
 
-               if (buttons & BIT(map->bit))
+               if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) ||
+                   (map->ev_type == EV_SW && (switches & BIT(map->bit))))
                        input_set_capability(idev, map->ev_type, map->code);
        }
 
index f51ae09596ef25942ff6bab030be9697c3a0eca7..403452ef00e6f257d67ca44bdf5626b0e5cc53a4 100644 (file)
@@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev)
        struct matrix_keypad_platform_data *pdata;
        struct device_node *np = dev->of_node;
        unsigned int *gpios;
-       int i, nrow, ncol;
+       int ret, i, nrow, ncol;
 
        if (!np) {
                dev_err(dev, "device lacks DT data\n");
@@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev)
                return ERR_PTR(-ENOMEM);
        }
 
-       for (i = 0; i < pdata->num_row_gpios; i++)
-               gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+       for (i = 0; i < nrow; i++) {
+               ret = of_get_named_gpio(np, "row-gpios", i);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+               gpios[i] = ret;
+       }
 
-       for (i = 0; i < pdata->num_col_gpios; i++)
-               gpios[pdata->num_row_gpios + i] =
-                       of_get_named_gpio(np, "col-gpios", i);
+       for (i = 0; i < ncol; i++) {
+               ret = of_get_named_gpio(np, "col-gpios", i);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+               gpios[nrow + i] = ret;
+       }
 
        pdata->row_gpios = gpios;
        pdata->col_gpios = &gpios[pdata->num_row_gpios];
@@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
        pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                pdata = matrix_keypad_parse_dt(&pdev->dev);
-               if (IS_ERR(pdata)) {
-                       dev_err(&pdev->dev, "no platform data defined\n");
+               if (IS_ERR(pdata))
                        return PTR_ERR(pdata);
-               }
        } else if (!pdata->keymap_data) {
                dev_err(&pdev->dev, "no keymap data defined\n");
                return -EINVAL;
index 46406345742b97c06595ab7e3b785ee2a6daca1b..a7dc286f406c992ebd55d764808691101dfff690 100644 (file)
 
 /* OMAP4 values */
 #define OMAP4_VAL_IRQDISABLE           0x0
-#define OMAP4_VAL_DEBOUNCINGTIME       0x7
-#define OMAP4_VAL_PVT                  0x7
+
+/*
+ * Errata i689: If a key is released for a time shorter than debounce time,
+ * the keyboard will idle and never detect the key release. The workaround
+ * is to use at least a 12ms debounce time. See omap5432 TRM chapter
+ * "26.4.6.2 Keyboard Controller Timer" for more information.
+ */
+#define OMAP4_KEYPAD_PTV_DIV_128        0x6
+#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv)     \
+       ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
+#define OMAP4_VAL_DEBOUNCINGTIME_16MS                                  \
+       OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
 
 enum {
        KBD_REVISION_OMAP4 = 0,
@@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
 
        kbd_writel(keypad_data, OMAP4_KBD_CTRL,
                        OMAP4_DEF_CTRL_NOSOFTMODE |
-                       (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
+                       (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
        kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
-                       OMAP4_VAL_DEBOUNCINGTIME);
+                       OMAP4_VAL_DEBOUNCINGTIME_16MS);
        /* clear pending interrupts */
        kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
                         kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
index b0f9d19b3410ae1867e1c134b30f8ccb8a1e5bd3..a94b6494e71a5b724b261ef31987c52a4c5523c6 100644 (file)
@@ -1348,6 +1348,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0618", 0 },
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
+       { "ELAN061E", 0 },
+       { "ELAN0620", 0 },
+       { "ELAN0621", 0 },
        { "ELAN0622", 0 },
        { "ELAN1000", 0 },
        { }
index 5e85f3cca867dc23feb5e5753c86addfb06ddaf9..2bd5bb11c8baec85bb9422dbfb0612e6bfed77f2 100644 (file)
@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0048", /* X1 Carbon 3 */
        "LEN0046", /* X250 */
        "LEN004a", /* W541 */
+       "LEN005b", /* P50 */
        "LEN0071", /* T480 */
        "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
        "LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -177,6 +178,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
        "LEN200f", /* T450s */
+       "SYN3221", /* HP 15-ay000 */
        NULL
 };
 
index 47a0e81a2989c93389e2affd20414d17057cde79..a8b9be3e28db709ef8769e29da2684c3d1e3bcf9 100644 (file)
@@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
                 * state because the Enter-UP can trigger a wakeup at once.
                 */
                if (!(info & IS_BREAK))
-                       pm_wakeup_event(&hv_dev->device, 0);
+                       pm_wakeup_hard_event(&hv_dev->device);
 
                break;
 
index 02fb119858197b34fc9fe4e9a50fc18bc3e98957..42d3fd7e04d7c1a7f29fb0689fe5b597ef070692 100644 (file)
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Touch Screen driver for Renesas MIGO-R Platform
  *
  * Copyright (c) 2008 Magnus Damm
  * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>,
  *  Kenati Technologies Pvt Ltd.
- *
- * This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU  General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
index b71673911aac303bbc61e34c1375582c6e7dbddf..11ff32c6802506a8deedfe460010c9974a046de6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * ST1232 Touchscreen Controller Driver
  *
@@ -7,15 +8,6 @@
  * Using code from:
  *  - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
  *     Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/delay.h>
@@ -295,4 +287,4 @@ module_i2c_driver(st1232_ts_driver);
 
 MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
 MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
index 6d4b2eec67b4fdde3fb78f7c8da0d2bb9531b22b..29836c1a40e987985937f89dc7574ca6205a489e 100644 (file)
@@ -80,8 +80,8 @@ struct dvb_pll_desc {
 
 static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
        .name  = "Thomson dtt7579",
-       .min   = 177000000,
-       .max   = 858000000,
+       .min   = 177 * MHz,
+       .max   = 858 * MHz,
        .iffreq= 36166667,
        .sleepdata = (u8[]){ 2, 0xb4, 0x03 },
        .count = 4,
@@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
        .name  = "Thomson dtt759x",
-       .min   = 177000000,
-       .max   = 896000000,
+       .min   = 177 * MHz,
+       .max   = 896 * MHz,
        .set   = thomson_dtt759x_bw,
        .iffreq= 36166667,
        .sleepdata = (u8[]){ 2, 0x84, 0x03 },
@@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
        .name  = "Thomson dtt7520x",
-       .min   = 185000000,
-       .max   = 900000000,
+       .min   = 185 * MHz,
+       .max   = 900 * MHz,
        .set   = thomson_dtt7520x_bw,
        .iffreq = 36166667,
        .count = 7,
@@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
 
 static const struct dvb_pll_desc dvb_pll_lg_z201 = {
        .name  = "LG z201",
-       .min   = 174000000,
-       .max   = 862000000,
+       .min   = 174 * MHz,
+       .max   = 862 * MHz,
        .iffreq= 36166667,
        .sleepdata = (u8[]){ 2, 0xbc, 0x03 },
        .count = 5,
@@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = {
 
 static const struct dvb_pll_desc dvb_pll_unknown_1 = {
        .name  = "unknown 1", /* used by dntv live dvb-t */
-       .min   = 174000000,
-       .max   = 862000000,
+       .min   = 174 * MHz,
+       .max   = 862 * MHz,
        .iffreq= 36166667,
        .count = 9,
        .entries = {
@@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = {
  */
 static const struct dvb_pll_desc dvb_pll_tua6010xs = {
        .name  = "Infineon TUA6010XS",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .iffreq= 36125000,
        .count = 3,
        .entries = {
@@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = {
 /* Panasonic env57h1xd5 (some Philips PLL ?) */
 static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
        .name  = "Panasonic ENV57H1XD5",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .iffreq= 36125000,
        .count = 4,
        .entries = {
@@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_tda665x = {
        .name  = "Philips TDA6650/TDA6651",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .set   = tda665x_bw,
        .iffreq= 36166667,
        .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
@@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_tua6034 = {
        .name  = "Infineon TUA6034",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .iffreq= 36166667,
        .count = 3,
        .set   = tua6034_bw,
@@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_tded4 = {
        .name = "ALPS TDED4",
-       .min = 47000000,
-       .max = 863000000,
+       .min =  47 * MHz,
+       .max = 863 * MHz,
        .iffreq= 36166667,
        .set   = tded4_bw,
        .count = 4,
@@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = {
  */
 static const struct dvb_pll_desc dvb_pll_tdhu2 = {
        .name = "ALPS TDHU2",
-       .min = 54000000,
-       .max = 864000000,
+       .min =  54 * MHz,
+       .max = 864 * MHz,
        .iffreq= 44000000,
        .count = 4,
        .entries = {
@@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = {
  */
 static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
        .name = "Samsung TBMV30111IN / TBMV30712IN1",
-       .min = 54000000,
-       .max = 860000000,
+       .min =  54 * MHz,
+       .max = 860 * MHz,
        .iffreq= 44000000,
        .count = 6,
        .entries = {
@@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
  */
 static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
        .name  = "Philips SD1878",
-       .min   =  950000,
-       .max   = 2150000,
+       .min   =  950 * MHz,
+       .max   = 2150 * MHz,
        .iffreq= 249, /* zero-IF, offset 249 is to round up */
        .count = 4,
        .entries = {
@@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_opera1 = {
        .name  = "Opera Tuner",
-       .min   =  900000,
-       .max   = 2250000,
+       .min   =  900 * MHz,
+       .max   = 2250 * MHz,
        .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 },
        .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 },
        .iffreq= 0,
@@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
 /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
 static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
        .name   = "Samsung DTOS403IH102A",
-       .min    =  44250000,
-       .max    = 858000000,
+       .min    = 44250 * kHz,
+       .max    = 858 * MHz,
        .iffreq =  36125000,
        .count  = 8,
        .set    = samsung_dtos403ih102a_set,
@@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
 /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
 static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
        .name   = "Samsung TDTC9251DH0",
-       .min    =  48000000,
-       .max    = 863000000,
+       .min    =  48 * MHz,
+       .max    = 863 * MHz,
        .iffreq =  36166667,
        .count  = 3,
        .entries = {
@@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
 /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
 static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
        .name = "Samsung TBDU18132",
-       .min    =  950000,
-       .max    = 2150000, /* guesses */
+       .min    =  950 * MHz,
+       .max    = 2150 * MHz, /* guesses */
        .iffreq = 0,
        .count = 2,
        .entries = {
@@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
 /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
 static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
        .name = "Samsung TBMU24112",
-       .min    =  950000,
-       .max    = 2150000, /* guesses */
+       .min    =  950 * MHz,
+       .max    = 2150 * MHz, /* guesses */
        .iffreq = 0,
        .count = 2,
        .entries = {
@@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
  * 822 - 862   1  *  0   0   1   0   0   0   0x88 */
 static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
        .name = "ALPS TDEE4",
-       .min    =  47000000,
-       .max    = 862000000,
+       .min    =  47 * MHz,
+       .max    = 862 * MHz,
        .iffreq =  36125000,
        .count = 4,
        .entries = {
@@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
 /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */
 static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
        .name   = "Infineon TUA6034 ISDB-T (Friio)",
-       .min    =  90000000,
-       .max    = 770000000,
+       .min    =  90 * MHz,
+       .max    = 770 * MHz,
        .iffreq =  57000000,
        .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 },
        .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b },
@@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
 /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */
 static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = {
        .name   = "Philips TDA6651 ISDB-T (EarthSoft PT1)",
-       .min    =  90000000,
-       .max    = 770000000,
+       .min    =  90 * MHz,
+       .max    = 770 * MHz,
        .iffreq =  57000000,
        .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 },
        .count = 10,
@@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
        u32 div;
        int i;
 
-       if (frequency && (frequency < desc->min || frequency > desc->max))
-               return -EINVAL;
-
        for (i = 0; i < desc->count; i++) {
                if (frequency > desc->entries[i].limit)
                        continue;
@@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
        struct dvb_pll_priv *priv = NULL;
        int ret;
        const struct dvb_pll_desc *desc;
-       struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
        b1 = kmalloc(1, GFP_KERNEL);
        if (!b1)
@@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 
        strncpy(fe->ops.tuner_ops.info.name, desc->name,
                sizeof(fe->ops.tuner_ops.info.name));
-       switch (c->delivery_system) {
-       case SYS_DVBS:
-       case SYS_DVBS2:
-       case SYS_TURBO:
-       case SYS_ISDBS:
-               fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz;
-               fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
-               break;
-       default:
-               fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
-               fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
-       }
+
+       fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
+       fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
+
+       dprintk("%s tuner, frequency range: %u...%u\n",
+               desc->name, desc->min, desc->max);
 
        if (!desc->initdata)
                fe->ops.tuner_ops.init = NULL;
index 4e9db1fed69711f731089472799ac47d85b4ab94..c71a34ae6383c65651bd08baa8de0e5e59e59e9c 100644 (file)
@@ -238,6 +238,9 @@ static const struct file_operations request_fops = {
        .owner = THIS_MODULE,
        .poll = media_request_poll,
        .unlocked_ioctl = media_request_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = media_request_ioctl,
+#endif /* CONFIG_COMPAT */
        .release = media_request_close,
 };
 
index b292cff26c8663636a3cc959688032c8d79e6b3d..013cdebecbc49b5a899a91fb8455383f2f6df506 100644 (file)
@@ -304,7 +304,8 @@ restart:
                for (; p < p_out + sz; p++) {
                        u32 copy;
 
-                       p = memchr(p, magic[ctx->comp_magic_cnt], sz);
+                       p = memchr(p, magic[ctx->comp_magic_cnt],
+                                  p_out + sz - p);
                        if (!p) {
                                ctx->comp_magic_cnt = 0;
                                break;
index fce9d6f4b7c924c6b95dea6c59f792ac701b7031..3137f5d89d8030448e4cad4b912a6dca2caa11df 100644 (file)
@@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
 
        /* append the packet to the frame buffer */
        if (len > 0) {
-               if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) {
+               if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) {
                        gspca_err(gspca_dev, "frame overflow %d > %d\n",
                                  gspca_dev->image_len + len,
-                                 gspca_dev->pixfmt.sizeimage);
+                                 PAGE_ALIGN(gspca_dev->pixfmt.sizeimage));
                        packet_type = DISCARD_PACKET;
                } else {
 /* !! image is NULL only when last pkt is LAST or DISCARD
@@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq,
                             unsigned int sizes[], struct device *alloc_devs[])
 {
        struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq);
+       unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
 
        if (*nplanes)
-               return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0;
+               return sizes[0] < size ? -EINVAL : 0;
        *nplanes = 1;
-       sizes[0] = gspca_dev->pixfmt.sizeimage;
+       sizes[0] = size;
        return 0;
 }
 
 static int gspca_buffer_prepare(struct vb2_buffer *vb)
 {
        struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue);
-       unsigned long size = gspca_dev->pixfmt.sizeimage;
+       unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
 
        if (vb2_plane_size(vb, 0) < size) {
                gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n",
index 8f9d6964173ec7852072022f597e45ac5ca5d88d..b99a194ce5a4a2926d8eda42e610c64ab34c5edb 100644 (file)
@@ -263,6 +263,11 @@ static const struct file_operations fops = {
 #endif
 };
 
+static void cros_ec_class_release(struct device *dev)
+{
+       kfree(to_cros_ec_dev(dev));
+}
+
 static void cros_ec_sensors_register(struct cros_ec_dev *ec)
 {
        /*
@@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev)
        int retval = -ENOMEM;
        struct device *dev = &pdev->dev;
        struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
-       struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+       struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
 
        if (!ec)
                return retval;
@@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev)
        ec->class_dev.devt = MKDEV(ec_major, pdev->id);
        ec->class_dev.class = &cros_class;
        ec->class_dev.parent = dev;
+       ec->class_dev.release = cros_ec_class_release;
 
        retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
        if (retval) {
index f43fb2f958a54e12c4d29ad91340e237e9d98e5a..93dfcef8afc4bc63a2e852b464e6482d3359e53c 100644 (file)
@@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
                   aggregator->aggregator_identifier);
 
        /* Tell the partner that this port is not suitable for aggregation */
+       port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+       port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
+       port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
        port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
        __update_lacpdu_from_port(port);
        ad_lacpdu_send(port);
index 65f10fec25b397345b03e503526a3b40de514a3a..0b3e51f248c21a2477c9b1736b0f06b80e350e29 100644 (file)
@@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
        /* Reset the switch. */
        REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
                  GLOBAL_ATU_CONTROL_SWRESET |
-                 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
-                 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+                 GLOBAL_ATU_CONTROL_LEARNDIS);
 
        /* Wait up to one second for reset to complete. */
        timeout = jiffies + 1 * HZ;
@@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
         */
        REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
 
-       /* Enable automatic address learning, set the address
-        * database size to 1024 entries, and set the default aging
-        * time to 5 minutes.
+       /* Disable automatic address learning.
         */
        REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
-                 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
-                 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+                 GLOBAL_ATU_CONTROL_LEARNDIS);
 
        return 0;
 }
index f02592f43fe36f3af460672746ade204d1f574fb..a7e853fa43c24a07e719ef3a31b216df07fefafb 100644 (file)
@@ -674,7 +674,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 
                rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
 
-               is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+               is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
 
                pkt_type = 0xFFU & (rxd_wb->type >> 4);
 
index d4c30011752992ccf6d0f48d6f119e6950041e19..5d21c14853acc90cb07434fec4460ce004614db5 100644 (file)
@@ -5162,6 +5162,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                cp = le16_to_cpu(resp->alloc_cmpl_rings);
                stats = le16_to_cpu(resp->alloc_stat_ctx);
                cp = min_t(u16, cp, stats);
+               hw_resc->resv_irqs = cp;
                if (bp->flags & BNXT_FLAG_CHIP_P5) {
                        int rx = hw_resc->resv_rx_rings;
                        int tx = hw_resc->resv_tx_rings;
@@ -5175,7 +5176,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                                hw_resc->resv_rx_rings = rx;
                                hw_resc->resv_tx_rings = tx;
                        }
-                       cp = le16_to_cpu(resp->alloc_msix);
+                       hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
                        hw_resc->resv_hw_ring_grps = rx;
                }
                hw_resc->resv_cp_rings = cp;
@@ -5353,7 +5354,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
                return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
 }
 
-static int bnxt_cp_rings_in_use(struct bnxt *bp)
+static int bnxt_nq_rings_in_use(struct bnxt *bp)
 {
        int cp = bp->cp_nr_rings;
        int ulp_msix, ulp_base;
@@ -5368,10 +5369,22 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
        return cp;
 }
 
+static int bnxt_cp_rings_in_use(struct bnxt *bp)
+{
+       int cp;
+
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               return bnxt_nq_rings_in_use(bp);
+
+       cp = bp->tx_nr_rings + bp->rx_nr_rings;
+       return cp;
+}
+
 static bool bnxt_need_reserve_rings(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
        int cp = bnxt_cp_rings_in_use(bp);
+       int nq = bnxt_nq_rings_in_use(bp);
        int rx = bp->rx_nr_rings;
        int vnic = 1, grp = rx;
 
@@ -5387,7 +5400,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
                rx <<= 1;
        if (BNXT_NEW_RM(bp) &&
            (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
-            hw_resc->resv_vnics != vnic ||
+            hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
             (hw_resc->resv_hw_ring_grps != grp &&
              !(bp->flags & BNXT_FLAG_CHIP_P5))))
                return true;
@@ -5397,7 +5410,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
 static int __bnxt_reserve_rings(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-       int cp = bnxt_cp_rings_in_use(bp);
+       int cp = bnxt_nq_rings_in_use(bp);
        int tx = bp->tx_nr_rings;
        int rx = bp->rx_nr_rings;
        int grp, rx_rings, rc;
@@ -5422,7 +5435,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
        tx = hw_resc->resv_tx_rings;
        if (BNXT_NEW_RM(bp)) {
                rx = hw_resc->resv_rx_rings;
-               cp = hw_resc->resv_cp_rings;
+               cp = hw_resc->resv_irqs;
                grp = hw_resc->resv_hw_ring_grps;
                vnic = hw_resc->resv_vnics;
        }
@@ -6292,6 +6305,8 @@ hwrm_func_qcaps_exit:
        return rc;
 }
 
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+
 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
        int rc;
@@ -6299,6 +6314,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
        rc = __bnxt_hwrm_func_qcaps(bp);
        if (rc)
                return rc;
+       rc = bnxt_hwrm_queue_qportcfg(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
+               return rc;
+       }
        if (bp->hwrm_spec_code >= 0x10803) {
                rc = bnxt_alloc_ctx_mem(bp);
                if (rc)
@@ -7026,7 +7046,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
 
 unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
 {
-       return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
+       unsigned int cp = bp->hw_resc.max_cp_rings;
+
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               cp -= bnxt_get_ulp_msix_num(bp);
+
+       return cp;
 }
 
 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
@@ -7048,7 +7073,9 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
        int total_req = bp->cp_nr_rings + num;
        int max_idx, avail_msix;
 
-       max_idx = min_t(int, bp->total_irqs, max_cp);
+       max_idx = bp->total_irqs;
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               max_idx = min_t(int, bp->total_irqs, max_cp);
        avail_msix = max_idx - bp->cp_nr_rings;
        if (!BNXT_NEW_RM(bp) || avail_msix >= num)
                return avail_msix;
@@ -7066,7 +7093,7 @@ static int bnxt_get_num_msix(struct bnxt *bp)
        if (!BNXT_NEW_RM(bp))
                return bnxt_get_max_func_irqs(bp);
 
-       return bnxt_cp_rings_in_use(bp);
+       return bnxt_nq_rings_in_use(bp);
 }
 
 static int bnxt_init_msix(struct bnxt *bp)
@@ -7794,6 +7821,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 
                rc = bnxt_hwrm_func_resc_qcaps(bp, true);
                hw_resc->resv_cp_rings = 0;
+               hw_resc->resv_irqs = 0;
                hw_resc->resv_tx_rings = 0;
                hw_resc->resv_rx_rings = 0;
                hw_resc->resv_hw_ring_grps = 0;
@@ -9799,13 +9827,16 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
                                int *max_cp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-       int max_ring_grps = 0;
+       int max_ring_grps = 0, max_irq;
 
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
-       *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
-                       hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
-       *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+       *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
+       max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
+                       bnxt_get_ulp_msix_num(bp),
+                       bnxt_get_max_func_stat_ctxs(bp));
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               *max_cp = min_t(int, *max_cp, max_irq);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
                *max_cp -= 1;
@@ -9813,6 +9844,11 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
        }
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                *max_rx >>= 1;
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+               /* On P5 chips, max_cp output param should be available NQs */
+               *max_cp = max_irq;
+       }
        *max_rx = min_t(int, *max_rx, max_ring_grps);
 }
 
index 9e99d4ab3e062fe14fee7cd78b8626032fa5c705..3030931ccaf8afc25b4d42d4a8390a7692bcbbcd 100644 (file)
@@ -928,6 +928,7 @@ struct bnxt_hw_resc {
        u16     min_stat_ctxs;
        u16     max_stat_ctxs;
        u16     max_irqs;
+       u16     resv_irqs;
 };
 
 #if defined(CONFIG_BNXT_SRIOV)
index b59b382d34f94277a9a33e52223fda8ab7f19af3..0a3097baafde6f31b47ec6b1a3fb9982ed7d6329 100644 (file)
@@ -168,7 +168,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
        if (BNXT_NEW_RM(bp)) {
                struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
-               avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
+               avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
index 4c3925af53bccf0215f9fabd037adf8ab993ea46..abe5d0dac8510b984692b70c6e4f5307c105335e 100644 (file)
@@ -111,7 +111,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
        "mac_tx_one_collision",
        "mac_tx_multi_collision",
        "mac_tx_max_collision_fail",
-       "mac_tx_max_deferal_fail",
+       "mac_tx_max_deferral_fail",
        "mac_tx_fifo_err",
        "mac_tx_runts",
 
index ea9859e028d48f55328eb211bdbef27e7f2b8f76..de61060721c4a67a1f55e47bb29ddf4f38b0dce3 100644 (file)
@@ -349,13 +349,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
        struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
        struct sk_buff *skb = sc->ctxptr;
        struct net_device *ndev = skb->dev;
+       u32 iq_no;
 
        dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
                         sc->datasize, DMA_TO_DEVICE);
        dev_kfree_skb_any(skb);
+       iq_no = sc->iq_no;
        octeon_free_soft_command(oct, sc);
 
-       if (octnet_iq_is_full(oct, sc->iq_no))
+       if (octnet_iq_is_full(oct, iq_no))
                return;
 
        if (netif_queue_stopped(ndev))
index c415ac67cb7bef218d476fc59f7302b83660513b..e80fedb27cee81411019914f483590c7ec6c1871 100644 (file)
@@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
        if (!muram_node) {
                dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
                        __func__);
-               goto fman_node_put;
+               goto fman_free;
        }
 
        err = of_address_to_resource(muram_node, 0,
@@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
                of_node_put(muram_node);
                dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
                        __func__, err);
-               goto fman_node_put;
+               goto fman_free;
        }
 
        of_node_put(muram_node);
-       of_node_put(fm_node);
 
        err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
                               "fman", fman);
index e2f80cca9bed432e88221d89d54fe337e09e16d0..0d2de6f676764d73729e859ecfcceaf0a1f47ac9 100644 (file)
@@ -231,7 +231,7 @@ struct emac_regs {
 #define EMAC_STACR_PHYE                        0x00004000
 #define EMAC_STACR_STAC_MASK           0x00003000
 #define EMAC_STACR_STAC_READ           0x00001000
-#define EMAC_STACR_STAC_WRITE          0x00000800
+#define EMAC_STACR_STAC_WRITE          0x00002000
 #define EMAC_STACR_OPBC_MASK           0x00000C00
 #define EMAC_STACR_OPBC_50             0x00000000
 #define EMAC_STACR_OPBC_66             0x00000400
index c0203a0d5e3b8be87f33a7eb7a1f11b1ecd8ca18..ed50b8dee44f3a8699ca0a226d81f482cdd0f3c0 100644 (file)
@@ -1859,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
        if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
            adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
-               netdev_notify_peers(netdev);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
 
        netif_carrier_on(netdev);
 
index 7a37a37e3fb347d90679db2ca16231597ba9afad..125ea99418df6915da4c5ad9427802e0aeb738f9 100644 (file)
@@ -4375,8 +4375,27 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                                   unsigned long *supported,
                                   struct phylink_link_state *state)
 {
+       struct mvpp2_port *port = netdev_priv(dev);
        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 
+       /* Invalid combinations */
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_10GKR:
+       case PHY_INTERFACE_MODE_XAUI:
+               if (port->gop_id != 0)
+                       goto empty_set;
+               break;
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               if (port->gop_id == 0)
+                       goto empty_set;
+               break;
+       default:
+               break;
+       }
+
        phylink_set(mask, Autoneg);
        phylink_set_port_modes(mask);
        phylink_set(mask, Pause);
@@ -4384,6 +4403,8 @@ static void mvpp2_phylink_validate(struct net_device *dev,
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_10GKR:
+       case PHY_INTERFACE_MODE_XAUI:
+       case PHY_INTERFACE_MODE_NA:
                phylink_set(mask, 10000baseCR_Full);
                phylink_set(mask, 10000baseSR_Full);
                phylink_set(mask, 10000baseLR_Full);
@@ -4391,7 +4412,11 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                phylink_set(mask, 10000baseER_Full);
                phylink_set(mask, 10000baseKR_Full);
                /* Fall-through */
-       default:
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_SGMII:
                phylink_set(mask, 10baseT_Half);
                phylink_set(mask, 10baseT_Full);
                phylink_set(mask, 100baseT_Half);
@@ -4403,11 +4428,18 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                phylink_set(mask, 1000baseT_Full);
                phylink_set(mask, 1000baseX_Full);
                phylink_set(mask, 2500baseX_Full);
+               break;
+       default:
+               goto empty_set;
        }
 
        bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
        bitmap_and(state->advertising, state->advertising, mask,
                   __ETHTOOL_LINK_MODE_MASK_NBITS);
+       return;
+
+empty_set:
+       bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
 static void mvpp22_xlg_link_state(struct mvpp2_port *port,
index 36054e6fb9d34840cd15f9c45296c9258df9a276..f200b8c420d5738e5bc5c67b124c71cc27f21fd6 100644 (file)
@@ -5,7 +5,7 @@
 config MLX4_EN
        tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
        depends on MAY_USE_DEVLINK
-       depends on PCI
+       depends on PCI && NETDEVICES && ETHERNET && INET
        select MLX4_CORE
        imply PTP_1588_CLOCK
        ---help---
index f11b45001cad8c5635684e820a03f183e12d6ef5..d290f0787dfbb22e444bc5892639e4717f11647a 100644 (file)
@@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
 
        tx_pause = !!(pause->tx_pause);
        rx_pause = !!(pause->rx_pause);
-       rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
-       tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+       rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
+       tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
 
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
                                    priv->rx_skb_size + ETH_FCS_LEN,
index b744cd49a7856e97917bcdce93e7d5ee205f09cd..6b88881b8e3585422f2548df3267175bc9d6b16f 100644 (file)
@@ -3493,8 +3493,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
 
-       /* MTU range: 46 - hw-specific max */
-       dev->min_mtu = MLX4_EN_MIN_MTU;
+       /* MTU range: 68 - hw-specific max */
+       dev->min_mtu = ETH_MIN_MTU;
        dev->max_mtu = priv->max_mtu;
 
        mdev->pndev[port] = dev;
index 485d856546c6c3b83cacdcbe6376d73862dd0f8a..8137454e253497e37b638a88432daf01184c95cb 100644 (file)
 #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
                                  ETH_HLEN + PREAMBLE_LEN)
 
-#define MLX4_EN_MIN_MTU                46
 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
  * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
  */
index 16985ca3248d72c4001f5997f0ead50ac10e6d0a..624eed345b5d2b19fa5ed54935667b41090383f8 100644 (file)
@@ -724,9 +724,9 @@ static u32 mlx5e_get_fcs(const struct sk_buff *skb)
        return __get_unaligned_cpu32(fcs_bytes);
 }
 
-static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
+static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 {
-       void *ip_p = skb->data + sizeof(struct ethhdr);
+       void *ip_p = skb->data + network_depth;
 
        return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
                                            ((struct ipv6hdr *)ip_p)->nexthdr;
@@ -755,7 +755,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                goto csum_unnecessary;
 
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
-               if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
+               if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
                        goto csum_unnecessary;
 
                skb->ip_summed = CHECKSUM_COMPLETE;
index ad06d9969bc13eb465a82b9138bc0ea342ab15e9..5c13674439f1f0751a369a3112d19bea46a2464c 100644 (file)
@@ -560,7 +560,7 @@ static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
 
        mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
                                                &mc_entry);
-       if (WARN_ON(!mc_record))
+       if (!mc_record)
                return;
 
        mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
@@ -647,7 +647,7 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
 
        key.fid_index = mlxsw_sp_fid_index(fid);
        mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
-       if (WARN_ON(!mc_list))
+       if (!mc_list)
                return;
 
        mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
index 9e9bb57134f2c868c63c69adc239b396244d444b..6ebf99cc315443e48b9b850957c3a77d0ca559db 100644 (file)
@@ -1275,15 +1275,12 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
 {
        u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
        enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
-       struct net_device *ipip_ul_dev;
 
        if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
                return false;
 
-       ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
        return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
-                                                ul_tb_id, ipip_entry) &&
-              (!ipip_ul_dev || ipip_ul_dev == ul_dev);
+                                                ul_tb_id, ipip_entry);
 }
 
 /* Given decap parameters, find the corresponding IPIP entry. */
index 739a51f0a366f421074a5ac31a6ec887898fb76a..50080c60a279436ad52eb95658ff01fc4587242b 100644 (file)
@@ -296,7 +296,13 @@ static bool
 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
                                    bridge_port)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
+       struct net_device *dev = bridge_port->dev;
+       struct mlxsw_sp *mlxsw_sp;
+
+       if (is_vlan_dev(dev))
+               mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
+       else
+               mlxsw_sp = mlxsw_sp_lower_get(dev);
 
        /* In case ports were pulled from out of a bridged LAG, then
         * it's possible the reference count isn't zero, yet the bridge
@@ -2109,7 +2115,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
 
        vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
-       if (WARN_ON(!mlxsw_sp_port_vlan))
+       if (!mlxsw_sp_port_vlan)
                return;
 
        mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
@@ -2134,8 +2140,10 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
        if (!fid)
                return -EINVAL;
 
-       if (mlxsw_sp_fid_vni_is_set(fid))
-               return -EINVAL;
+       if (mlxsw_sp_fid_vni_is_set(fid)) {
+               err = -EINVAL;
+               goto err_vni_exists;
+       }
 
        err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
        if (err)
@@ -2149,6 +2157,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
        return 0;
 
 err_nve_fid_enable:
+err_vni_exists:
        mlxsw_sp_fid_put(fid);
        return err;
 }
index 29c95423ab64604a9ef9629b3b5b42469994280d..2f49eb75f3cce3245b7162a2ee3cf664bc0ab7f1 100644 (file)
@@ -476,16 +476,16 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        if (err)
                goto err_destroy_flow;
 
-       err = nfp_flower_xmit_flow(netdev, flow_pay,
-                                  NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
-       if (err)
-               goto err_destroy_flow;
-
        flow_pay->tc_flower_cookie = flow->cookie;
        err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
                                     nfp_flower_table_params);
        if (err)
-               goto err_destroy_flow;
+               goto err_release_metadata;
+
+       err = nfp_flower_xmit_flow(netdev, flow_pay,
+                                  NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+       if (err)
+               goto err_remove_rhash;
 
        port->tc_offload_cnt++;
 
@@ -494,6 +494,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 
        return 0;
 
+err_remove_rhash:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+                                           &flow_pay->fl_node,
+                                           nfp_flower_table_params));
+err_release_metadata:
+       nfp_modify_flow_metadata(app, flow_pay);
 err_destroy_flow:
        kfree(flow_pay->action_data);
        kfree(flow_pay->mask_data);
index 81045dfa1cd898726da2e2e37520be720e0e8842..44f6e4873aadd16b35ae34c3561393fc010a698b 100644 (file)
@@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
        struct cp_private *cp;
        int handled = 0;
        u16 status;
+       u16 mask;
 
        if (unlikely(dev == NULL))
                return IRQ_NONE;
@@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 
        spin_lock(&cp->lock);
 
+       mask = cpr16(IntrMask);
+       if (!mask)
+               goto out_unlock;
+
        status = cpr16(IntrStatus);
        if (!status || (status == 0xFFFF))
                goto out_unlock;
index 6732f5cbde081052ce9e1c2417451118cadbff8f..7c7cd9d94bcc18ddb1d5685fc59f608cec765be8 100644 (file)
                                 NETIF_MSG_TX_ERR)
 
 /* Parameter for descriptor */
-#define AVE_NR_TXDESC          32      /* Tx descriptor */
-#define AVE_NR_RXDESC          64      /* Rx descriptor */
+#define AVE_NR_TXDESC          64      /* Tx descriptor */
+#define AVE_NR_RXDESC          256     /* Rx descriptor */
 
 #define AVE_DESC_OFS_CMDSTS    0
 #define AVE_DESC_OFS_ADDRL     4
 
 /* Parameter for ethernet frame */
 #define AVE_MAX_ETHFRAME       1518
+#define AVE_FRAME_HEADROOM     2
 
 /* Parameter for interrupt */
 #define AVE_INTM_COUNT         20
@@ -576,12 +577,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
 
        skb = priv->rx.desc[entry].skbs;
        if (!skb) {
-               skb = netdev_alloc_skb_ip_align(ndev,
-                                               AVE_MAX_ETHFRAME);
+               skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
                if (!skb) {
                        netdev_err(ndev, "can't allocate skb for Rx\n");
                        return -ENOMEM;
                }
+               skb->data += AVE_FRAME_HEADROOM;
+               skb->tail += AVE_FRAME_HEADROOM;
        }
 
        /* set disable to cmdsts */
@@ -594,12 +596,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
         * - Rx buffer begins with 2 byte headroom, and data will be put from
         *   (buffer + 2).
         * To satisfy this, specify the address to put back the buffer
-        * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
-        * and expand the map size by NET_IP_ALIGN.
+        * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
+        * by AVE_FRAME_HEADROOM.
         */
        ret = ave_dma_map(ndev, &priv->rx.desc[entry],
-                         skb->data - NET_IP_ALIGN,
-                         AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+                         skb->data - AVE_FRAME_HEADROOM,
+                         AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
                          DMA_FROM_DEVICE, &paddr);
        if (ret) {
                netdev_err(ndev, "can't map skb for Rx\n");
@@ -1689,9 +1691,10 @@ static int ave_probe(struct platform_device *pdev)
                 pdev->name, pdev->id);
 
        /* Register as a NAPI supported driver */
-       netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+       netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx,
+                      NAPI_POLL_WEIGHT);
        netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
-                         priv->tx.ndesc);
+                         NAPI_POLL_WEIGHT);
 
        platform_set_drvdata(pdev, ndev);
 
@@ -1913,5 +1916,6 @@ static struct platform_driver ave_driver = {
 };
 module_platform_driver(ave_driver);
 
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
 MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
 MODULE_LICENSE("GPL v2");
index 076a8be18d6754c489ecaf031fb65db79f58c29b..5551fead8f6646f327dcfc7d5b98d8f3483f3eeb 100644 (file)
@@ -2550,12 +2550,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
                        netdev_warn(priv->dev, "PTP init failed\n");
        }
 
-#ifdef CONFIG_DEBUG_FS
-       ret = stmmac_init_fs(dev);
-       if (ret < 0)
-               netdev_warn(priv->dev, "%s: failed debugFS registration\n",
-                           __func__);
-#endif
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
        if (priv->use_riwt) {
@@ -2756,10 +2750,6 @@ static int stmmac_release(struct net_device *dev)
 
        netif_carrier_off(dev);
 
-#ifdef CONFIG_DEBUG_FS
-       stmmac_exit_fs(dev);
-#endif
-
        stmmac_release_ptp(priv);
 
        return 0;
@@ -3899,6 +3889,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
        u32 tx_count = priv->plat->tx_queues_to_use;
        u32 queue;
 
+       if ((dev->flags & IFF_UP) == 0)
+               return 0;
+
        for (queue = 0; queue < rx_count; queue++) {
                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 
@@ -4397,6 +4390,13 @@ int stmmac_dvr_probe(struct device *device,
                goto error_netdev_register;
        }
 
+#ifdef CONFIG_DEBUG_FS
+       ret = stmmac_init_fs(ndev);
+       if (ret < 0)
+               netdev_warn(priv->dev, "%s: failed debugFS registration\n",
+                           __func__);
+#endif
+
        return ret;
 
 error_netdev_register:
@@ -4432,6 +4432,9 @@ int stmmac_dvr_remove(struct device *dev)
 
        netdev_info(priv->dev, "%s: removing driver", __func__);
 
+#ifdef CONFIG_DEBUG_FS
+       stmmac_exit_fs(ndev);
+#endif
        stmmac_stop_all_dma(priv);
 
        stmmac_mac_set(priv, priv->ioaddr, false);
index fc8d5f1ee1addeebd4b3748032a1632b1f41df06..0da3d36b283becf838bed9357c0520e0714529e5 100644 (file)
@@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev)
                goto hash_add;
        }
 
-       err = -EBUSY;
+       err = -EADDRINUSE;
        if (macvlan_addr_busy(vlan->port, dev->dev_addr))
                goto out;
 
@@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
        } else {
                /* Rehash and update the device filters */
                if (macvlan_addr_busy(vlan->port, addr))
-                       return -EBUSY;
+                       return -EADDRINUSE;
 
                if (!macvlan_passthru(port)) {
                        err = dev_uc_add(lowerdev, addr);
@@ -747,6 +747,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
                return dev_set_mac_address(vlan->lowerdev, addr);
        }
 
+       if (macvlan_addr_busy(vlan->port, addr->sa_data))
+               return -EADDRINUSE;
+
        return macvlan_sync_address(dev, addr->sa_data);
 }
 
index 23ee3967c166713bac24906f1705a53c8eed8d70..18e92c19c5ab8716f6a87e905689a2b0c16b56d4 100644 (file)
@@ -1880,20 +1880,17 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-       phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
-                              PHY_10BT_FEATURES);
-
        switch (max_speed) {
-       default:
-               return -ENOTSUPP;
-       case SPEED_1000:
-               phydev->supported |= PHY_1000BT_FEATURES;
+       case SPEED_10:
+               phydev->supported &= ~PHY_100BT_FEATURES;
                /* fall through */
        case SPEED_100:
-               phydev->supported |= PHY_100BT_FEATURES;
-               /* fall through */
-       case SPEED_10:
-               phydev->supported |= PHY_10BT_FEATURES;
+               phydev->supported &= ~PHY_1000BT_FEATURES;
+               break;
+       case SPEED_1000:
+               break;
+       default:
+               return -ENOTSUPP;
        }
 
        return 0;
index 83060fb349f4d5d458e762eb540afe0de2b935d6..ad9db652874dc737d0a76f16b9af4c56749d60fc 100644 (file)
@@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
        /* 1000Base-PX or 1000Base-BX10 */
        if ((id->base.e_base_px || id->base.e_base_bx10) &&
            br_min <= 1300 && br_max >= 1200)
-               phylink_set(support, 1000baseX_Full);
+               phylink_set(modes, 1000baseX_Full);
 
        /* For active or passive cables, select the link modes
         * based on the bit rates and the cable compliance bytes.
index e244f5d7512a6e8f8a2a5b5eb141a2024f80a078..005020042be946a23609dca3dfdcb589d60eff01 100644 (file)
@@ -2293,9 +2293,9 @@ static void tun_setup(struct net_device *dev)
 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
                        struct netlink_ext_ack *extack)
 {
-       if (!data)
-               return 0;
-       return -EINVAL;
+       NL_SET_ERR_MSG(extack,
+                      "tun/tap creation via rtnetlink is not supported.");
+       return -EOPNOTSUPP;
 }
 
 static size_t tun_get_size(const struct net_device *dev)
@@ -2385,6 +2385,7 @@ static int tun_xdp_one(struct tun_struct *tun,
                       struct tun_file *tfile,
                       struct xdp_buff *xdp, int *flush)
 {
+       unsigned int datasize = xdp->data_end - xdp->data;
        struct tun_xdp_hdr *hdr = xdp->data_hard_start;
        struct virtio_net_hdr *gso = &hdr->gso;
        struct tun_pcpu_stats *stats;
@@ -2461,7 +2462,7 @@ build:
        stats = get_cpu_ptr(tun->pcpu_stats);
        u64_stats_update_begin(&stats->syncp);
        stats->rx_packets++;
-       stats->rx_bytes += skb->len;
+       stats->rx_bytes += datasize;
        u64_stats_update_end(&stats->syncp);
        put_cpu_ptr(stats);
 
index cecfd77c9f3ca6e4ef1b454a7c049d5f4ec11e56..ea672145f6a66b97ec3572f213939a64323fcdf3 100644 (file)
@@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
                                   struct receive_queue *rq,
                                   struct page *page, unsigned int offset,
-                                  unsigned int len, unsigned int truesize)
+                                  unsigned int len, unsigned int truesize,
+                                  bool hdr_valid)
 {
        struct sk_buff *skb;
        struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        else
                hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
-       memcpy(hdr, p, hdr_len);
+       if (hdr_valid)
+               memcpy(hdr, p, hdr_len);
 
        len -= hdr_len;
        offset += hdr_padded_len;
@@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
                                   struct virtnet_rq_stats *stats)
 {
        struct page *page = buf;
-       struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+       struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
+                                         PAGE_SIZE, true);
 
        stats->bytes += len - vi->hdr_len;
        if (unlikely(!skb))
@@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                rcu_read_unlock();
                                put_page(page);
                                head_skb = page_to_skb(vi, rq, xdp_page,
-                                                      offset, len, PAGE_SIZE);
+                                                      offset, len,
+                                                      PAGE_SIZE, false);
                                return head_skb;
                        }
                        break;
@@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                goto err_skb;
        }
 
-       head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+       head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
        curr_skb = head_skb;
 
        if (unlikely(!curr_skb))
index aa8058264d5b5bd6d6d112eeaaf999acde15d263..d1464e3e1be21a23f0cbc2d82bd4360bced5b4ee 100644 (file)
@@ -2884,6 +2884,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
 
        wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 
+       tasklet_hrtimer_init(&data->beacon_timer,
+                            mac80211_hwsim_beacon,
+                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
        err = ieee80211_register_hw(hw);
        if (err < 0) {
                pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
@@ -2908,10 +2912,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                    data->debugfs,
                                    data, &hwsim_simulate_radar);
 
-       tasklet_hrtimer_init(&data->beacon_timer,
-                            mac80211_hwsim_beacon,
-                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
        spin_lock_bh(&hwsim_radio_lock);
        err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
                                     hwsim_rht_params);
@@ -3703,16 +3703,16 @@ static int __init init_mac80211_hwsim(void)
        if (err)
                goto out_unregister_pernet;
 
+       err = hwsim_init_netlink();
+       if (err)
+               goto out_unregister_driver;
+
        hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
        if (IS_ERR(hwsim_class)) {
                err = PTR_ERR(hwsim_class);
-               goto out_unregister_driver;
+               goto out_exit_netlink;
        }
 
-       err = hwsim_init_netlink();
-       if (err < 0)
-               goto out_unregister_driver;
-
        for (i = 0; i < radios; i++) {
                struct hwsim_new_radio_params param = { 0 };
 
@@ -3818,6 +3818,8 @@ out_free_mon:
        free_netdev(hwsim_mon);
 out_free_radios:
        mac80211_hwsim_free();
+out_exit_netlink:
+       hwsim_exit_netlink();
 out_unregister_driver:
        platform_driver_unregister(&mac80211_hwsim_driver);
 out_unregister_pernet:
index 182258f64417b6d8165653ecffdbb77ac853a3b8..d0c621b32f72314adcd7a382fbddf78d462dfebf 100644 (file)
@@ -111,6 +111,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
                struct nd_mapping *nd_mapping, resource_size_t *overlap);
 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
 resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
+               resource_size_t size);
 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
                struct nd_label_id *label_id);
 int alias_dpa_busy(struct device *dev, void *data);
index 24c64090169e6a76e5200806bb2078c9969fc1ba..6f22272e8d8014d9ccf3a7e480ea42c2c3b8b2ba 100644 (file)
@@ -649,14 +649,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
                        ALIGN_DOWN(phys, nd_pfn->align));
 }
 
+/*
+ * Check if pmem collides with 'System RAM', or other regions when
+ * section aligned.  Trim it accordingly.
+ */
+static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
+{
+       struct nd_namespace_common *ndns = nd_pfn->ndns;
+       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+       struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
+       const resource_size_t start = nsio->res.start;
+       const resource_size_t end = start + resource_size(&nsio->res);
+       resource_size_t adjust, size;
+
+       *start_pad = 0;
+       *end_trunc = 0;
+
+       adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
+       size = resource_size(&nsio->res) + adjust;
+       if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
+                               IORES_DESC_NONE) == REGION_MIXED
+                       || nd_region_conflict(nd_region, start - adjust, size))
+               *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+
+       /* Now check that end of the range does not collide. */
+       adjust = PHYS_SECTION_ALIGN_UP(end) - end;
+       size = resource_size(&nsio->res) + adjust;
+       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+                               IORES_DESC_NONE) == REGION_MIXED
+                       || !IS_ALIGNED(end, nd_pfn->align)
+                       || nd_region_conflict(nd_region, start, size + adjust))
+               *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
+}
+
 static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
        u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
-       u32 start_pad = 0, end_trunc = 0;
+       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        resource_size_t start, size;
-       struct nd_namespace_io *nsio;
        struct nd_region *nd_region;
+       u32 start_pad, end_trunc;
        struct nd_pfn_sb *pfn_sb;
        unsigned long npfns;
        phys_addr_t offset;
@@ -688,30 +721,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 
        memset(pfn_sb, 0, sizeof(*pfn_sb));
 
-       /*
-        * Check if pmem collides with 'System RAM' when section aligned and
-        * trim it accordingly
-        */
-       nsio = to_nd_namespace_io(&ndns->dev);
-       start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
-       size = resource_size(&nsio->res);
-       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED) {
-               start = nsio->res.start;
-               start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-       }
-
-       start = nsio->res.start;
-       size = PHYS_SECTION_ALIGN_UP(start + size) - start;
-       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED
-                       || !IS_ALIGNED(start + resource_size(&nsio->res),
-                               nd_pfn->align)) {
-               size = resource_size(&nsio->res);
-               end_trunc = start + size - phys_pmem_align_down(nd_pfn,
-                               start + size);
-       }
-
+       trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
        if (start_pad + end_trunc)
                dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
                                dev_name(&ndns->dev), start_pad + end_trunc);
@@ -722,7 +732,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
         * implementation will limit the pfns advertised through
         * ->direct_access() to those that are included in the memmap.
         */
-       start += start_pad;
+       start = nsio->res.start + start_pad;
        size = resource_size(&nsio->res);
        npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
                        / PAGE_SIZE);
index 174a418cb171545db6c27b0896d023aea75bdee0..e7377f1028ef687637a4a9f481899b05cc264b1f 100644 (file)
@@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
 }
 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
 
+struct conflict_context {
+       struct nd_region *nd_region;
+       resource_size_t start, size;
+};
+
+static int region_conflict(struct device *dev, void *data)
+{
+       struct nd_region *nd_region;
+       struct conflict_context *ctx = data;
+       resource_size_t res_end, region_end, region_start;
+
+       if (!is_memory(dev))
+               return 0;
+
+       nd_region = to_nd_region(dev);
+       if (nd_region == ctx->nd_region)
+               return 0;
+
+       res_end = ctx->start + ctx->size;
+       region_start = nd_region->ndr_start;
+       region_end = region_start + nd_region->ndr_size;
+       if (ctx->start >= region_start && ctx->start < region_end)
+               return -EBUSY;
+       if (res_end > region_start && res_end <= region_end)
+               return -EBUSY;
+       return 0;
+}
+
+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
+               resource_size_t size)
+{
+       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+       struct conflict_context ctx = {
+               .nd_region = nd_region,
+               .start = start,
+               .size = size,
+       };
+
+       return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
+}
+
 void __exit nd_region_devs_exit(void)
 {
        ida_destroy(&region_ida);
index f905768627368dc2b6c913cde8622b0b80e3d9ac..1310753a01e519e6de46e285cc99213e0ce26717 100644 (file)
@@ -846,6 +846,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
+       unsigned long flags;
+       bool startka = false;
 
        blk_mq_free_request(rq);
 
@@ -857,7 +859,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
        }
 
        ctrl->comp_seen = false;
-       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+       spin_lock_irqsave(&ctrl->lock, flags);
+       if (ctrl->state == NVME_CTRL_LIVE ||
+           ctrl->state == NVME_CTRL_CONNECTING)
+               startka = true;
+       spin_unlock_irqrestore(&ctrl->lock, flags);
+       if (startka)
+               schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
index 932242e27b03ea173295f2922ae3b2efcccdea2a..fb84caddd94b8406905a75a35c8d27deeb3f8c34 100644 (file)
@@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
        struct nvmet_rdma_rsp *rsp =
                container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+       struct nvmet_rdma_queue *queue = cq->cq_context;
 
        nvmet_rdma_release_rsp(rsp);
 
@@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
                     wc->status != IB_WC_WR_FLUSH_ERR)) {
                pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
                        wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
-               nvmet_rdma_error_comp(rsp->queue);
+               nvmet_rdma_error_comp(queue);
        }
 }
 
index dcb29cb76dc69d1a958f4ae25732048b7532e549..f78860ce884bc531860bba73bce96b83f1c444ae 100644 (file)
@@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
        struct pcie_link_state *link;
        int blacklist = !!pcie_aspm_sanity_check(pdev);
 
-       if (!aspm_support_enabled || aspm_disabled)
+       if (!aspm_support_enabled)
                return;
 
        if (pdev->link_state)
index 97b6f197f0079b949e4947df00efaed4dbfd76fe..c9c57b4a0b71850a75cebbcf1a664293d7d5156a 100644 (file)
@@ -56,6 +56,7 @@ struct virtio_ccw_device {
        unsigned int revision; /* Transport revision */
        wait_queue_head_t wait_q;
        spinlock_t lock;
+       struct mutex io_lock; /* Serializes I/O requests */
        struct list_head virtqueues;
        unsigned long indicators;
        unsigned long indicators2;
@@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
        unsigned long flags;
        int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
 
+       mutex_lock(&vcdev->io_lock);
        do {
                spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
                ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
@@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
                cpu_relax();
        } while (ret == -EBUSY);
        wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
-       return ret ? ret : vcdev->err;
+       ret = ret ? ret : vcdev->err;
+       mutex_unlock(&vcdev->io_lock);
+       return ret;
 }
 
 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
@@ -828,6 +832,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
        int ret;
        struct ccw1 *ccw;
        void *config_area;
+       unsigned long flags;
 
        ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
        if (!ccw)
@@ -846,11 +851,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
        if (ret)
                goto out_free;
 
+       spin_lock_irqsave(&vcdev->lock, flags);
        memcpy(vcdev->config, config_area, offset + len);
-       if (buf)
-               memcpy(buf, &vcdev->config[offset], len);
        if (vcdev->config_ready < offset + len)
                vcdev->config_ready = offset + len;
+       spin_unlock_irqrestore(&vcdev->lock, flags);
+       if (buf)
+               memcpy(buf, config_area + offset, len);
 
 out_free:
        kfree(config_area);
@@ -864,6 +871,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
        struct virtio_ccw_device *vcdev = to_vc_device(vdev);
        struct ccw1 *ccw;
        void *config_area;
+       unsigned long flags;
 
        ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
        if (!ccw)
@@ -876,9 +884,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
        /* Make sure we don't overwrite fields. */
        if (vcdev->config_ready < offset)
                virtio_ccw_get_config(vdev, 0, NULL, offset);
+       spin_lock_irqsave(&vcdev->lock, flags);
        memcpy(&vcdev->config[offset], buf, len);
        /* Write the config area to the host. */
        memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+       spin_unlock_irqrestore(&vcdev->lock, flags);
        ccw->cmd_code = CCW_CMD_WRITE_CONF;
        ccw->flags = 0;
        ccw->count = offset + len;
@@ -1247,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
        init_waitqueue_head(&vcdev->wait_q);
        INIT_LIST_HEAD(&vcdev->virtqueues);
        spin_lock_init(&vcdev->lock);
+       mutex_init(&vcdev->io_lock);
 
        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
        dev_set_drvdata(&cdev->dev, vcdev);
index 5c8ed7350a04a4f65be2eefa103637f2943e5a37..a36e4cf1841d9da7fd22cb5f6491d134b9b7f96f 100644 (file)
@@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op)
        dev_set_drvdata(&op->dev, p);
        d7s_device = p;
        err = 0;
+       of_node_put(opts);
 
 out:
        return err;
index 56e962a014939e31c7ad74687263ecd69558ef7b..b8481927bfe4048b4147e01c77c0f7f695ab90bb 100644 (file)
@@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
                        for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
                                pchild->mon_type[len] = ENVCTRL_NOMON;
                        }
+                       of_node_put(root_node);
                        return;
                }
+               of_node_put(root_node);
        }
 
        /* Get the monitor channels. */
index 93c66ebad907ee0e9b99505e54aed32853ab4aae..f78d2e5c1471d3faf22d8e5a9b98e566f1531189 100644 (file)
@@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
 failed:
                ISCSI_DBG_EH(session,
                             "failing session reset: Could not log back into "
-                            "%s, %s [age %d]\n", session->targetname,
-                            conn->persistent_address, session->age);
+                            "%s [age %d]\n", session->targetname,
+                            session->age);
                spin_unlock_bh(&session->frwd_lock);
                mutex_unlock(&session->eh_mutex);
                return FAILED;
index 20fa6785a0e2e882b6e63ae4914625f24ec7679b..68d62d55a3a50429eb97febc179726300618f151 100644 (file)
@@ -167,7 +167,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                       sizeof(phba->wwpn));
        }
 
-       phba->sli3_options = 0x0;
+       /*
+        * Clear all option bits except LPFC_SLI3_BG_ENABLED,
+        * which was already set in lpfc_get_cfgparam()
+        */
+       phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 
        /* Setup and issue mailbox READ REV command */
        lpfc_read_rev(phba, pmb);
index 783a1540cfbea6e918fb08ee71a13642e03164f8..b9e5cd79931a21293a58e18125b2e2f650d47a73 100644 (file)
@@ -4965,7 +4965,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
                phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
                                        LPFC_SLI3_HBQ_ENABLED |
                                        LPFC_SLI3_CRP_ENABLED |
-                                       LPFC_SLI3_BG_ENABLED |
                                        LPFC_SLI3_DSS_ENABLED);
                if (rc != MBX_SUCCESS) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
index f03dc03a42c35c31eb91ec2af88f18ddf98069de..8f88348ebe4245b8206c1b68115360e10d61c66a 100644 (file)
@@ -446,7 +446,6 @@ struct storvsc_device {
 
        bool     destroy;
        bool     drain_notify;
-       bool     open_sub_channel;
        atomic_t num_outstanding_req;
        struct Scsi_Host *host;
 
@@ -636,33 +635,38 @@ get_in_err:
 static void handle_sc_creation(struct vmbus_channel *new_sc)
 {
        struct hv_device *device = new_sc->primary_channel->device_obj;
+       struct device *dev = &device->device;
        struct storvsc_device *stor_device;
        struct vmstorage_channel_properties props;
+       int ret;
 
        stor_device = get_out_stor_device(device);
        if (!stor_device)
                return;
 
-       if (stor_device->open_sub_channel == false)
-               return;
-
        memset(&props, 0, sizeof(struct vmstorage_channel_properties));
 
-       vmbus_open(new_sc,
-                  storvsc_ringbuffer_size,
-                  storvsc_ringbuffer_size,
-                  (void *)&props,
-                  sizeof(struct vmstorage_channel_properties),
-                  storvsc_on_channel_callback, new_sc);
+       ret = vmbus_open(new_sc,
+                        storvsc_ringbuffer_size,
+                        storvsc_ringbuffer_size,
+                        (void *)&props,
+                        sizeof(struct vmstorage_channel_properties),
+                        storvsc_on_channel_callback, new_sc);
 
-       if (new_sc->state == CHANNEL_OPENED_STATE) {
-               stor_device->stor_chns[new_sc->target_cpu] = new_sc;
-               cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+       /* In case vmbus_open() fails, we don't use the sub-channel. */
+       if (ret != 0) {
+               dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
+               return;
        }
+
+       /* Add the sub-channel to the array of available channels. */
+       stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+       cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
 }
 
 static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
+       struct device *dev = &device->device;
        struct storvsc_device *stor_device;
        int num_cpus = num_online_cpus();
        int num_sc;
@@ -679,21 +683,11 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
        request = &stor_device->init_request;
        vstor_packet = &request->vstor_packet;
 
-       stor_device->open_sub_channel = true;
        /*
         * Establish a handler for dealing with subchannels.
         */
        vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
 
-       /*
-        * Check to see if sub-channels have already been created. This
-        * can happen when this driver is re-loaded after unloading.
-        */
-
-       if (vmbus_are_subchannels_present(device->channel))
-               return;
-
-       stor_device->open_sub_channel = false;
        /*
         * Request the host to create sub-channels.
         */
@@ -710,23 +704,29 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
                               VM_PKT_DATA_INBAND,
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 
-       if (ret != 0)
+       if (ret != 0) {
+               dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
                return;
+       }
 
        t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
-       if (t == 0)
+       if (t == 0) {
+               dev_err(dev, "Failed to create sub-channel: timed out\n");
                return;
+       }
 
        if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-           vstor_packet->status != 0)
+           vstor_packet->status != 0) {
+               dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+                       vstor_packet->operation, vstor_packet->status);
                return;
+       }
 
        /*
-        * Now that we created the sub-channels, invoke the check; this
-        * may trigger the callback.
+        * We need to do nothing here, because vmbus_process_offer()
+        * invokes channel->sc_creation_callback, which will open and use
+        * the sub-channel(s).
         */
-       stor_device->open_sub_channel = true;
-       vmbus_are_subchannels_present(device->channel);
 }
 
 static void cache_wwn(struct storvsc_device *stor_device,
@@ -1794,7 +1794,6 @@ static int storvsc_probe(struct hv_device *device,
        }
 
        stor_device->destroy = false;
-       stor_device->open_sub_channel = false;
        init_waitqueue_head(&stor_device->waiting_to_drain);
        stor_device->device = device;
        stor_device->host = host;
index 6e491023fdd88b2860b92699896569edbcfe4da6..0d6b2a88fc8e26d9297801304c0d2f1a65fbbb43 100644 (file)
@@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
 
 static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
 {
-       pvscsi_shutdown_intr(adapter);
-
        if (adapter->workqueue)
                destroy_workqueue(adapter->workqueue);
 
@@ -1534,6 +1532,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 out_reset_adapter:
        ll_adapter_reset(adapter);
 out_release_resources:
+       pvscsi_shutdown_intr(adapter);
        pvscsi_release_resources(adapter);
        scsi_host_put(host);
 out_disable_device:
@@ -1542,6 +1541,7 @@ out_disable_device:
        return error;
 
 out_release_resources_and_disable:
+       pvscsi_shutdown_intr(adapter);
        pvscsi_release_resources(adapter);
        goto out_disable_device;
 }
index ec277ece47afd87dbb61d00f66568e138657d3d1..a951b3fd1ea1dee94c7e572affcbce22dd3ee7ed 100644 (file)
@@ -5,3 +5,8 @@ Before this stateless decoder driver can leave the staging area:
 * Userspace support for the Request API needs to be reviewed;
 * Another stateless decoder driver should be submitted;
 * At least one stateless encoder driver should be submitted.
+* When queueing a request containing references to I frames, the
+  refcount of the memory for those I frames needs to be incremented
+  and decremented when the request is completed. This will likely
+  require some help from vb2. The driver should fail the request
+  if the memory/buffer is gone.
index 9d156efbc9edfe9933143464a0c689fb762635c7..4d473f008aa48f7c3f5d2735402a5edc257dbf61 100644 (file)
@@ -146,7 +146,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
                p = buff;
                p += sprintf(p, "ASSOCINFO(ReqIEs=");
                len = sec_ie[1] + 2;
-               len =  (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1;
+               len =  (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
                for (i = 0; i < len; i++)
                        p += sprintf(p, "%02x", sec_ie[i]);
                p += sprintf(p, ")");
index a7374006a9fbe1eef65649377bd07f6e9aef3354..986a1d52691804fb5173b780347f907fc7a5ed3c 100644 (file)
@@ -1346,7 +1346,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
                     u8 *out_ie, uint in_len)
 {
        u8 authmode = 0, match;
-       u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
+       u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255];
        u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
        uint ielength, cnt, remove_cnt;
        int iEntry;
index 69c7abc0e3a551af5fd0328a0f6926010a32986d..8445d516c93d352cb0f83ab809154109faee5cde 100644 (file)
@@ -1565,7 +1565,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
        if (pstat->aid > 0) {
                DBG_871X("  old AID %d\n", pstat->aid);
        } else {
-               for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
+               for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
                        if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
                                break;
 
index 92f67d40f2e96a876083a71b6287e2bf159243c6..d7105d01859ab086de87482535019408ae15c818 100644 (file)
@@ -357,7 +357,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
        int ret;
 
        /* Valid check */
-       if (armada_is_valid(priv)) {
+       if (!armada_is_valid(priv)) {
                dev_err(priv->dev,
                        "Temperature sensor reading not valid\n");
                return -EIO;
@@ -395,7 +395,7 @@ unlock_mutex:
        return ret;
 }
 
-static struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops of_ops = {
        .get_temp = armada_get_temp,
 };
 
@@ -526,23 +526,21 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
 
        /* First memory region points towards the status register */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -EIO;
-
-       /*
-        * Edit the resource start address and length to map over all the
-        * registers, instead of pointing at them one by one.
-        */
-       res->start -= data->syscon_status_off;
-       res->end = res->start + max(data->syscon_status_off,
-                                   max(data->syscon_control0_off,
-                                       data->syscon_control1_off)) +
-                  sizeof(unsigned int) - 1;
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
+       /*
+        * Fix up from the old individual DT register specification to
+        * cover all the registers.  We do this by adjusting the ioremap()
+        * result, which should be fine as ioremap() deals with pages.
+        * However, validate that we do not cross a page boundary while
+        * making this adjustment.
+        */
+       if (((unsigned long)base & ~PAGE_MASK) < data->syscon_status_off)
+               return -EINVAL;
+       base -= data->syscon_status_off;
+
        priv->syscon = devm_regmap_init_mmio(&pdev->dev, base,
                                             &armada_thermal_regmap_config);
        if (IS_ERR(priv->syscon))
index 23ad4f9f21438e45a819da46962025eeeb922590..b9d90f0ed504dc20357da58104eaf63526f73e06 100644 (file)
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Driver for Broadcom BCM2835 SoC temperature sensor
  *
  * Copyright (C) 2016 Martin Sperl
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/clk.h>
index 1919f91fa756597ff63f6e1518d92363f402e769..e8b1570cc3888a3eff4e066c324a3d3c4d10b95c 100644 (file)
@@ -299,7 +299,7 @@ static int brcmstb_set_trips(void *data, int low, int high)
        return 0;
 }
 
-static struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops of_ops = {
        .get_temp       = brcmstb_get_temp,
        .set_trips      = brcmstb_set_trips,
 };
index dd5e1cede2b5847979965aa217c29028c36f89cd..c3f933d10295eba70bc8cff4a501333109c1abeb 100644 (file)
@@ -213,17 +213,17 @@ static int mtk8250_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, data);
 
-       pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev)) {
-               err = mtk8250_runtime_resume(&pdev->dev);
-               if (err)
-                       return err;
-       }
+       err = mtk8250_runtime_resume(&pdev->dev);
+       if (err)
+               return err;
 
        data->line = serial8250_register_8250_port(&uart);
        if (data->line < 0)
                return data->line;
 
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        return 0;
 }
 
@@ -234,13 +234,11 @@ static int mtk8250_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        serial8250_unregister_port(data->line);
+       mtk8250_runtime_suspend(&pdev->dev);
 
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
 
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               mtk8250_runtime_suspend(&pdev->dev);
-
        return 0;
 }
 
index baeeeaec3f030f94d7c0176647dff851e6647232..6fb312e7af713ecd3efcc4c0ef069602635f7681 100644 (file)
@@ -233,7 +233,7 @@ static void kgdboc_put_char(u8 chr)
 static int param_set_kgdboc_var(const char *kmessage,
                                const struct kernel_param *kp)
 {
-       int len = strlen(kmessage);
+       size_t len = strlen(kmessage);
 
        if (len >= MAX_CONFIG_LEN) {
                pr_err("config string too long\n");
@@ -254,7 +254,7 @@ static int param_set_kgdboc_var(const char *kmessage,
 
        strcpy(config, kmessage);
        /* Chop out \n char as a result of echo */
-       if (config[len - 1] == '\n')
+       if (len && config[len - 1] == '\n')
                config[len - 1] = '\0';
 
        if (configured == 1)
index 70a4ea4eaa6e72b1191c27d66111c4ddb11d46f5..990376576970ae3607a5b645407050a4dfab82af 100644 (file)
@@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
                mode = of_get_property(dp, mode_prop, NULL);
                if (!mode)
                        mode = "9600,8,n,1,-";
+               of_node_put(dp);
        }
 
        cflag = CREAD | HUPCL | CLOCAL;
index ee80dfbd5442b034451e396cd66608e41e8e43bb..687250ec8032359f7ccdd0bfbd2b6e8b3bb4e1f5 100644 (file)
@@ -1373,7 +1373,13 @@ err_release_lock:
        return ERR_PTR(retval);
 }
 
-static void tty_free_termios(struct tty_struct *tty)
+/**
+ * tty_save_termios() - save tty termios data in driver table
+ * @tty: tty whose termios data to save
+ *
+ * Locking: Caller guarantees serialisation with tty_init_termios().
+ */
+void tty_save_termios(struct tty_struct *tty)
 {
        struct ktermios *tp;
        int idx = tty->index;
@@ -1392,6 +1398,7 @@ static void tty_free_termios(struct tty_struct *tty)
        }
        *tp = tty->termios;
 }
+EXPORT_SYMBOL_GPL(tty_save_termios);
 
 /**
  *     tty_flush_works         -       flush all works of a tty/pty pair
@@ -1491,7 +1498,7 @@ static void release_tty(struct tty_struct *tty, int idx)
        WARN_ON(!mutex_is_locked(&tty_mutex));
        if (tty->ops->shutdown)
                tty->ops->shutdown(tty);
-       tty_free_termios(tty);
+       tty_save_termios(tty);
        tty_driver_remove_tty(tty->driver, tty);
        tty->port->itty = NULL;
        if (tty->link)
index cb6075096a5b41b6fbf5e87b6b22239c8a5082e3..044c3cbdcfa40664497d13bd00e607584eff99c7 100644 (file)
@@ -633,7 +633,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
        if (tty_port_close_start(port, tty, filp) == 0)
                return;
        tty_port_shutdown(port, tty);
-       set_bit(TTY_IO_ERROR, &tty->flags);
+       if (!port->console)
+               set_bit(TTY_IO_ERROR, &tty->flags);
        tty_port_close_end(port, tty);
        tty_port_tty_set(port, NULL);
 }
index 0f9381b69a3b563bc63d8010a639bb270f0fc2c8..f76b2e0aba9d5f11d994cd06ebe217f283bada16 100644 (file)
@@ -2251,7 +2251,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
                /* descriptor may appear anywhere in config */
                err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
                                le16_to_cpu(udev->config[0].desc.wTotalLength),
-                               USB_DT_OTG, (void **) &desc);
+                               USB_DT_OTG, (void **) &desc, sizeof(*desc));
                if (err || !(desc->bmAttributes & USB_OTG_HNP))
                        return 0;
 
@@ -5163,7 +5163,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
 /* Handle notifying userspace about hub over-current events */
 static void port_over_current_notify(struct usb_port *port_dev)
 {
-       static char *envp[] = { NULL, NULL, NULL };
+       char *envp[3];
        struct device *hub_dev;
        char *port_dev_path;
 
@@ -5187,6 +5187,7 @@ static void port_over_current_notify(struct usb_port *port_dev)
        if (!envp[1])
                goto exit;
 
+       envp[2] = NULL;
        kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
 
        kfree(envp[1]);
index 0690fcff0ea23437e9ae4478c604d38c9cd870bf..514c5214ddb246be61bd9c30bcfe8a5123e41f34 100644 (file)
@@ -333,6 +333,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Midiman M-Audio Keystation 88es */
        { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* SanDisk Ultra Fit and Ultra Flair */
+       { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
+       { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 79d8bd7a612e65b5c765f16e0c5567a440cdc1da..4ebfbd737905169d1b96904952576defbd01e8f1 100644 (file)
@@ -832,14 +832,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
  */
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
-                              unsigned char type, void **ptr)
+                              unsigned char type, void **ptr, size_t minsize)
 {
        struct usb_descriptor_header *header;
 
        while (size >= sizeof(struct usb_descriptor_header)) {
                header = (struct usb_descriptor_header *)buffer;
 
-               if (header->bLength < 2) {
+               if (header->bLength < 2 || header->bLength > size) {
                        printk(KERN_ERR
                                "%s: bogus descriptor, type %d length %d\n",
                                usbcore_name,
@@ -848,7 +848,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
                        return -1;
                }
 
-               if (header->bDescriptorType == type) {
+               if (header->bDescriptorType == type && header->bLength >= minsize) {
                        *ptr = header;
                        return 0;
                }
index 684d6f074c3a490a291109ff9603ec4b44f84e58..09a8ebd955888d6d375d7cee3d0fc53fd1cf0d9c 100644 (file)
@@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
        top = itr + itr_size;
        result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
                        le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
-                       USB_DT_SECURITY, (void **) &secd);
+                       USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
        if (result == -1) {
                dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
                return 0;
index a9515265db4dba4392cc1a834876a5546d8490b2..a9ec7051f286414cff61cfe492b6de17a4f74462 100644 (file)
@@ -139,6 +139,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 pdev->device == 0x43bb))
                xhci->quirks |= XHCI_SUSPEND_DELAY;
 
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+           (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+               xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+
        if (pdev->vendor == PCI_VENDOR_ID_AMD)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
index c928dbbff8811206fd3f668564b3ffe627cdd2aa..dae3be1b9c8f01078a0f6ab259b6f4640ed5534c 100644 (file)
@@ -968,6 +968,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        unsigned int            delay = XHCI_MAX_HALT_USEC;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
+       u32                     res;
 
        if (!hcd->state)
                return 0;
@@ -1021,11 +1022,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        command = readl(&xhci->op_regs->command);
        command |= CMD_CSS;
        writel(command, &xhci->op_regs->command);
+       xhci->broken_suspend = 0;
        if (xhci_handshake(&xhci->op_regs->status,
                                STS_SAVE, 0, 10 * 1000)) {
-               xhci_warn(xhci, "WARN: xHC save state timeout\n");
-               spin_unlock_irq(&xhci->lock);
-               return -ETIMEDOUT;
+       /*
+        * AMD SNPS xHC 3.0 occasionally does not clear the
+        * SSS bit of USBSTS and when driver tries to poll
+        * to see if the xHC clears BIT(8) which never happens
+        * and driver assumes that controller is not responding
+        * and times out. To workaround this, its good to check
+        * if SRE and HCE bits are not set (as per xhci
+        * Section 5.4.2) and bypass the timeout.
+        */
+               res = readl(&xhci->op_regs->status);
+               if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
+                   (((res & STS_SRE) == 0) &&
+                               ((res & STS_HCE) == 0))) {
+                       xhci->broken_suspend = 1;
+               } else {
+                       xhci_warn(xhci, "WARN: xHC save state timeout\n");
+                       spin_unlock_irq(&xhci->lock);
+                       return -ETIMEDOUT;
+               }
        }
        spin_unlock_irq(&xhci->lock);
 
@@ -1078,7 +1096,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
 
        spin_lock_irq(&xhci->lock);
-       if (xhci->quirks & XHCI_RESET_ON_RESUME)
+       if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
                hibernated = true;
 
        if (!hibernated) {
@@ -4496,6 +4514,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       /* Prevent U1 if service interval is shorter than U1 exit latency */
+       if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+               if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+                       dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
+                       return USB3_LPM_DISABLED;
+               }
+       }
+
        if (xhci->quirks & XHCI_INTEL_HOST)
                timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
        else
@@ -4552,6 +4578,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       /* Prevent U2 if service interval is shorter than U2 exit latency */
+       if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+               if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+                       dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
+                       return USB3_LPM_DISABLED;
+               }
+       }
+
        if (xhci->quirks & XHCI_INTEL_HOST)
                timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
        else
index 260b259b72bcb6abaf85db2b32c591a64b23dae5..c3515bad5dbbad26efcc71f89fdd64cc28911329 100644 (file)
@@ -1850,6 +1850,7 @@ struct xhci_hcd {
 #define XHCI_ZERO_64B_REGS     BIT_ULL(32)
 #define XHCI_DEFAULT_PM_RUNTIME_ALLOW  BIT_ULL(33)
 #define XHCI_RESET_PLL_ON_DISCONNECT   BIT_ULL(34)
+#define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
@@ -1879,6 +1880,8 @@ struct xhci_hcd {
        void                    *dbc;
        /* platform-specific data -- must come last */
        unsigned long           priv[0] __aligned(sizeof(s64));
+       /* Broken Suspend flag for SNPS Suspend resume issue */
+       u8                      broken_suspend;
 };
 
 /* Platform specific overrides to generic XHCI hc_driver ops */
index 85b48c6ddc7e681e7aadb8ddb86d53dbffba85a9..39ca31b4de4667a49bc8016d79dd95be05f970f1 100644 (file)
@@ -51,6 +51,7 @@ static const struct usb_device_id appledisplay_table[] = {
        { APPLEDISPLAY_DEVICE(0x921c) },
        { APPLEDISPLAY_DEVICE(0x921d) },
        { APPLEDISPLAY_DEVICE(0x9222) },
+       { APPLEDISPLAY_DEVICE(0x9226) },
        { APPLEDISPLAY_DEVICE(0x9236) },
 
        /* Terminating entry */
index 17940589c647cfdcbcd81e7e3b0c5d30dec6e8b8..7d289302ff6cfd22578885acbbadb6a048a9f5af 100644 (file)
@@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options)
                cflag |= PARENB;
                break;
        }
-       co->cflag = cflag;
 
        /*
         * no need to check the index here: if the index is wrong, console
@@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options)
                        serial->type->set_termios(tty, port, &dummy);
 
                        tty_port_tty_set(&port->port, NULL);
+                       tty_save_termios(tty);
                        tty_kref_put(tty);
                }
                tty_port_set_initialized(&port->port, 1);
index 3a5f81a66d34f0b5d19d09d16f9e07451e7958ff..6b98d8e3a5bf8247784303ce890a990fb8ec1259 100644 (file)
@@ -944,10 +944,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
                if (msg->iova <= vq_msg->iova &&
                    msg->iova + msg->size - 1 >= vq_msg->iova &&
                    vq_msg->type == VHOST_IOTLB_MISS) {
-                       mutex_lock(&node->vq->mutex);
                        vhost_poll_queue(&node->vq->poll);
-                       mutex_unlock(&node->vq->mutex);
-
                        list_del(&node->node);
                        kfree(node);
                }
index 34bc3ab40c6da8d5637e6e5bd19e6fb27ec27fba..98ed5be132c6a59a798346f182259b398cdfc11c 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/sock.h>
 #include <linux/virtio_vsock.h>
 #include <linux/vhost.h>
+#include <linux/hashtable.h>
 
 #include <net/af_vsock.h>
 #include "vhost.h"
@@ -27,14 +28,14 @@ enum {
 
 /* Used to track all the vhost_vsock instances on the system. */
 static DEFINE_SPINLOCK(vhost_vsock_lock);
-static LIST_HEAD(vhost_vsock_list);
+static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
 
 struct vhost_vsock {
        struct vhost_dev dev;
        struct vhost_virtqueue vqs[2];
 
-       /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
-       struct list_head list;
+       /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
+       struct hlist_node hash;
 
        struct vhost_work send_pkt_work;
        spinlock_t send_pkt_list_lock;
@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
        return VHOST_VSOCK_DEFAULT_HOST_CID;
 }
 
-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
+/* Callers that dereference the return value must hold vhost_vsock_lock or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
 {
        struct vhost_vsock *vsock;
 
-       list_for_each_entry(vsock, &vhost_vsock_list, list) {
+       hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
                u32 other_cid = vsock->guest_cid;
 
                /* Skip instances that have no CID yet */
@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
        return NULL;
 }
 
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
-{
-       struct vhost_vsock *vsock;
-
-       spin_lock_bh(&vhost_vsock_lock);
-       vsock = __vhost_vsock_get(guest_cid);
-       spin_unlock_bh(&vhost_vsock_lock);
-
-       return vsock;
-}
-
 static void
 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                            struct vhost_virtqueue *vq)
@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        struct vhost_vsock *vsock;
        int len = pkt->len;
 
+       rcu_read_lock();
+
        /* Find the vhost_vsock according to guest context id  */
        vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
        if (!vsock) {
+               rcu_read_unlock();
                virtio_transport_free_pkt(pkt);
                return -ENODEV;
        }
@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        spin_unlock_bh(&vsock->send_pkt_list_lock);
 
        vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+       rcu_read_unlock();
        return len;
 }
 
@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
        struct vhost_vsock *vsock;
        struct virtio_vsock_pkt *pkt, *n;
        int cnt = 0;
+       int ret = -ENODEV;
        LIST_HEAD(freeme);
 
+       rcu_read_lock();
+
        /* Find the vhost_vsock according to guest context id  */
        vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
        if (!vsock)
-               return -ENODEV;
+               goto out;
 
        spin_lock_bh(&vsock->send_pkt_list_lock);
        list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
                        vhost_poll_queue(&tx_vq->poll);
        }
 
-       return 0;
+       ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
 }
 
 static struct virtio_vsock_pkt *
@@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
        spin_lock_init(&vsock->send_pkt_list_lock);
        INIT_LIST_HEAD(&vsock->send_pkt_list);
        vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
-
-       spin_lock_bh(&vhost_vsock_lock);
-       list_add_tail(&vsock->list, &vhost_vsock_list);
-       spin_unlock_bh(&vhost_vsock_lock);
        return 0;
 
 out:
@@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
         * executing.
         */
 
-       if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
-               sock_set_flag(sk, SOCK_DONE);
-               vsk->peer_shutdown = SHUTDOWN_MASK;
-               sk->sk_state = SS_UNCONNECTED;
-               sk->sk_err = ECONNRESET;
-               sk->sk_error_report(sk);
-       }
+       /* If the peer is still valid, no need to reset connection */
+       if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+               return;
+
+       /* If the close timeout is pending, let it expire.  This avoids races
+        * with the timeout callback.
+        */
+       if (vsk->close_work_scheduled)
+               return;
+
+       sock_set_flag(sk, SOCK_DONE);
+       vsk->peer_shutdown = SHUTDOWN_MASK;
+       sk->sk_state = SS_UNCONNECTED;
+       sk->sk_err = ECONNRESET;
+       sk->sk_error_report(sk);
 }
 
 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
@@ -577,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
        struct vhost_vsock *vsock = file->private_data;
 
        spin_lock_bh(&vhost_vsock_lock);
-       list_del(&vsock->list);
+       if (vsock->guest_cid)
+               hash_del_rcu(&vsock->hash);
        spin_unlock_bh(&vhost_vsock_lock);
 
+       /* Wait for other CPUs to finish using vsock */
+       synchronize_rcu();
+
        /* Iterating over all connections for all CIDs to find orphans is
         * inefficient.  Room for improvement here. */
        vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
@@ -620,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
 
        /* Refuse if CID is already in use */
        spin_lock_bh(&vhost_vsock_lock);
-       other = __vhost_vsock_get(guest_cid);
+       other = vhost_vsock_get(guest_cid);
        if (other && other != vsock) {
                spin_unlock_bh(&vhost_vsock_lock);
                return -EADDRINUSE;
        }
+
+       if (vsock->guest_cid)
+               hash_del_rcu(&vsock->hash);
+
        vsock->guest_cid = guest_cid;
+       hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
        spin_unlock_bh(&vhost_vsock_lock);
 
        return 0;
index efcf89a8ba44c3300f1a214f01ed935ce933c188..1a4e2b101ef24a09c63793f6e2d94f84443957cd 100644 (file)
@@ -389,13 +389,11 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
 
        /*
         * Here we don't really care about alignment since extent allocator can
-        * handle it.  We care more about the size, as if one block group is
-        * larger than maximum size, it's must be some obvious corruption.
+        * handle it.  We care more about the size.
         */
-       if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+       if (key->offset == 0) {
                block_group_err(fs_info, leaf, slot,
-                       "invalid block group size, have %llu expect (0, %llu]",
-                               key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+                               "invalid block group size 0");
                return -EUCLEAN;
        }
 
index abcd78e332feb05ad8fa7f540d14917d2835f68b..85dadb93c9926cad276833c82c5045f71e52ca16 100644 (file)
@@ -133,7 +133,7 @@ config CIFS_XATTR
 
 config CIFS_POSIX
         bool "CIFS POSIX Extensions"
-        depends on CIFS_XATTR
+        depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
         help
           Enabling this option will cause the cifs client to attempt to
          negotiate a newer dialect with servers, such as Samba 3.0.5
index 3713d22b95a7011bda1e8701e5aea54fee96ea2a..907e85d65bb4e09b5fdc8f7c1e6c35ef56b519ad 100644 (file)
@@ -174,7 +174,7 @@ cifs_bp_rename_retry:
 
                cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
                memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
-               full_path[dfsplen] = '\\';
+               full_path[dfsplen] = dirsep;
                for (i = 0; i < pplen-1; i++)
                        if (full_path[dfsplen+1+i] == '/')
                                full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
index 74c33d5fafc83ff841a43b15ad222a34acb860d4..c9bc56b1baac2deac379103f1a4a32da45747b9c 100644 (file)
@@ -2541,14 +2541,13 @@ static int
 cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
        struct cifs_aio_ctx *ctx)
 {
-       int wait_retry = 0;
        unsigned int wsize, credits;
        int rc;
        struct TCP_Server_Info *server =
                tlink_tcon(wdata->cfile->tlink)->ses->server;
 
        /*
-        * Try to resend this wdata, waiting for credits up to 3 seconds.
+        * Wait for credits to resend this wdata.
         * Note: we are attempting to resend the whole wdata not in segments
         */
        do {
@@ -2556,19 +2555,13 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
                        server, wdata->bytes, &wsize, &credits);
 
                if (rc)
-                       break;
+                       goto out;
 
                if (wsize < wdata->bytes) {
                        add_credits_and_wake_if(server, credits, 0);
                        msleep(1000);
-                       wait_retry++;
                }
-       } while (wsize < wdata->bytes && wait_retry < 3);
-
-       if (wsize < wdata->bytes) {
-               rc = -EBUSY;
-               goto out;
-       }
+       } while (wsize < wdata->bytes);
 
        rc = -EAGAIN;
        while (rc == -EAGAIN) {
@@ -3234,14 +3227,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
                        struct list_head *rdata_list,
                        struct cifs_aio_ctx *ctx)
 {
-       int wait_retry = 0;
        unsigned int rsize, credits;
        int rc;
        struct TCP_Server_Info *server =
                tlink_tcon(rdata->cfile->tlink)->ses->server;
 
        /*
-        * Try to resend this rdata, waiting for credits up to 3 seconds.
+        * Wait for credits to resend this rdata.
         * Note: we are attempting to resend the whole rdata not in segments
         */
        do {
@@ -3249,24 +3241,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
                                                &rsize, &credits);
 
                if (rc)
-                       break;
+                       goto out;
 
                if (rsize < rdata->bytes) {
                        add_credits_and_wake_if(server, credits, 0);
                        msleep(1000);
-                       wait_retry++;
                }
-       } while (rsize < rdata->bytes && wait_retry < 3);
-
-       /*
-        * If we can't find enough credits to send this rdata
-        * release the rdata and return failure, this will pass
-        * whatever I/O amount we have finished to VFS.
-        */
-       if (rsize < rdata->bytes) {
-               rc = -EBUSY;
-               goto out;
-       }
+       } while (rsize < rdata->bytes);
 
        rc = -EAGAIN;
        while (rc == -EAGAIN) {
index 9bcce89ea18ef458b25e786ce21b6f1c6122a178..48132eca3761de2b4cdf7c6c75ab8efda8cf7a26 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
        }
 }
 
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+       struct wait_exceptional_entry_queue ewait;
+       wait_queue_head_t *wq;
+
+       init_wait(&ewait.wait);
+       ewait.wait.func = wake_exceptional_entry_func;
+
+       wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+       prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+       xas_unlock_irq(xas);
+       schedule();
+       finish_wait(wq, &ewait.wait);
+
+       /*
+        * Entry lock waits are exclusive. Wake up the next waiter since
+        * we aren't sure we will acquire the entry lock and thus wake
+        * the next waiter up on unlock.
+        */
+       if (waitqueue_active(wq))
+               __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
+}
+
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
        /* If we were the only waiter woken, wake the next one */
@@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry)
  * @page: The page whose entry we want to lock
  *
  * Context: Process context.
- * Return: %true if the entry was locked or does not need to be locked.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
  */
-bool dax_lock_mapping_entry(struct page *page)
+dax_entry_t dax_lock_page(struct page *page)
 {
        XA_STATE(xas, NULL, 0);
        void *entry;
-       bool locked;
 
        /* Ensure page->mapping isn't freed while we look at it */
        rcu_read_lock();
        for (;;) {
                struct address_space *mapping = READ_ONCE(page->mapping);
 
-               locked = false;
-               if (!dax_mapping(mapping))
+               entry = NULL;
+               if (!mapping || !dax_mapping(mapping))
                        break;
 
                /*
@@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
                 * otherwise we would not have a valid pfn_to_page()
                 * translation.
                 */
-               locked = true;
+               entry = (void *)~0UL;
                if (S_ISCHR(mapping->host->i_mode))
                        break;
 
@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page)
                entry = xas_load(&xas);
                if (dax_is_locked(entry)) {
                        rcu_read_unlock();
-                       entry = get_unlocked_entry(&xas);
-                       xas_unlock_irq(&xas);
-                       put_unlocked_entry(&xas, entry);
+                       wait_entry_unlocked(&xas, entry);
                        rcu_read_lock();
                        continue;
                }
@@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
                break;
        }
        rcu_read_unlock();
-       return locked;
+       return (dax_entry_t)entry;
 }
 
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
        struct address_space *mapping = page->mapping;
        XA_STATE(xas, &mapping->i_pages, page->index);
-       void *entry;
 
        if (S_ISCHR(mapping->host->i_mode))
                return;
 
-       rcu_read_lock();
-       entry = xas_load(&xas);
-       rcu_read_unlock();
-       entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
-       dax_unlock_entry(&xas, entry);
+       dax_unlock_entry(&xas, (void *)cookie);
 }
 
 /*
index acc3a5536384cf65c6742d918cc2bd9d42305da3..fc281b738a9822a652f7d19bb60ae15acd7a7ebf 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,7 +62,6 @@
 #include <linux/oom.h>
 #include <linux/compat.h>
 #include <linux/vmalloc.h>
-#include <linux/freezer.h>
 
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
@@ -1084,7 +1083,7 @@ static int de_thread(struct task_struct *tsk)
        while (sig->notify_count) {
                __set_current_state(TASK_KILLABLE);
                spin_unlock_irq(lock);
-               freezable_schedule();
+               schedule();
                if (unlikely(__fatal_signal_pending(tsk)))
                        goto killed;
                spin_lock_irq(lock);
@@ -1112,7 +1111,7 @@ static int de_thread(struct task_struct *tsk)
                        __set_current_state(TASK_KILLABLE);
                        write_unlock_irq(&tasklist_lock);
                        cgroup_threadgroup_change_end(tsk);
-                       freezable_schedule();
+                       schedule();
                        if (unlikely(__fatal_signal_pending(tsk)))
                                goto killed;
                }
index d094e5688bd3e0e75717b77f3569946fce233a30..9a5bf1e8925b944afef5be2d5b631dbc2c0c163d 100644 (file)
@@ -1884,15 +1884,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                                dio->wait_for_completion = true;
                                ret = 0;
                        }
-
-                       /*
-                        * Splicing to pipes can fail on a full pipe. We have to
-                        * swallow this to make it look like a short IO
-                        * otherwise the higher splice layers will completely
-                        * mishandle the error and stop moving data.
-                        */
-                       if (ret == -EFAULT)
-                               ret = 0;
                        break;
                }
                pos += ret;
index aa12c3063baec60bad7483004379c2edd7800f5e..33824a0a57bfe5de9e31f4d13e4d2eebc3b7b2df 100644 (file)
@@ -98,8 +98,11 @@ struct nfs_direct_req {
        struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
        struct work_struct      work;
        int                     flags;
+       /* for write */
 #define NFS_ODIRECT_DO_COMMIT          (1)     /* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES     (2)     /* write verification failed */
+       /* for read */
+#define NFS_ODIRECT_SHOULD_DIRTY       (3)     /* dirty user-space page after read */
        struct nfs_writeverf    verf;           /* unstable write verifier */
 };
 
@@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
                struct page *page = req->wb_page;
 
-               if (!PageCompound(page) && bytes < hdr->good_bytes)
+               if (!PageCompound(page) && bytes < hdr->good_bytes &&
+                   (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
                        set_page_dirty(page);
                bytes += req->wb_bytes;
                nfs_list_remove_request(req);
@@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
+       if (iter_is_iovec(iter))
+               dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+
        nfs_start_io_direct(inode);
 
        NFS_I(inode)->read_io += count;
index 74b36ed883caa9e67517230c3b43fa0e7843b73f..310d7500f66528cc65e13f258480b2be600dea5c 100644 (file)
@@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
        if (fh)
                hdr->args.fh = fh;
 
-       if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+       if (vers == 4 &&
+               !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
                goto out_failed;
 
        /*
@@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
        if (fh)
                hdr->args.fh = fh;
 
-       if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+       if (vers == 4 &&
+               !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
                goto out_failed;
 
        /*
index 4dae0399c75a7227f8c53a5d2a06d985fa31b3a0..58f30537c47a0a9d04cdba4abbd78188379ae463 100644 (file)
@@ -1956,7 +1956,7 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
        struct inode *inode_out = file_inode(file_out);
        loff_t ret;
 
-       WARN_ON_ONCE(remap_flags);
+       WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
 
        if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
                return -EISDIR;
index 3553f1956508daeca04aa1acaa1aaab61cdbb6e5..de2ede048473cef81bec9161cf1842b70835ea63 100644 (file)
@@ -945,11 +945,16 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
        sd->flags &= ~SPLICE_F_NONBLOCK;
        more = sd->flags & SPLICE_F_MORE;
 
+       WARN_ON_ONCE(pipe->nrbufs != 0);
+
        while (len) {
                size_t read_len;
                loff_t pos = sd->pos, prev_pos = pos;
 
-               ret = do_splice_to(in, &pos, pipe, len, flags);
+               /* Don't try to read more the pipe has space for. */
+               read_len = min_t(size_t, len,
+                                (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
+               ret = do_splice_to(in, &pos, pipe, read_len, flags);
                if (unlikely(ret <= 0))
                        goto out_release;
 
index 34c6d7bd4d180c736d8da7e6c33a413ee177ac5f..bbdae2b4559fc91d0e7f650fcfe6ed81868b5512 100644 (file)
@@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc(
 
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
-                       return __this_address;
+                       return false;
                return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
        }
 
index 404e581f1ea1e879e4a32099d303e53a89fbb91a..1ee8c5539fa4f2e999808acc021d63be0b4963b0 100644 (file)
@@ -1126,9 +1126,9 @@ xfs_free_file_space(
         * page could be mmap'd and iomap_zero_range doesn't do that for us.
         * Writeback of the eof page will do this, albeit clumsily.
         */
-       if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+       if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-                               (offset + len) & ~PAGE_MASK, LLONG_MAX);
+                               round_down(offset + len, PAGE_SIZE), LLONG_MAX);
        }
 
        return error;
index 73a1d77ec187c8958d09cc6fb13f4af3e0e61605..3091e4bc04efe1e6f4d9aa88ed7987a221f9bd78 100644 (file)
@@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot(
                statp->f_files = limit;
                statp->f_ffree =
                        (statp->f_files > dqp->q_res_icount) ?
-                        (statp->f_ffree - dqp->q_res_icount) : 0;
+                        (statp->f_files - dqp->q_res_icount) : 0;
        }
 }
 
index 450b28db95331ffbe19963c804391d7249cfb2c3..0dd316a74a295132ea6b6c04f914356c5c4064d6 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/radix-tree.h>
 #include <asm/pgtable.h>
 
+typedef unsigned long dax_entry_t;
+
 struct iomap_ops;
 struct dax_device;
 struct dax_operations {
@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
                struct block_device *bdev, struct writeback_control *wbc);
 
 struct page *dax_layout_busy_page(struct address_space *mapping);
-bool dax_lock_mapping_entry(struct page *page);
-void dax_unlock_mapping_entry(struct page *page);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
 #else
 static inline bool bdev_dax_supported(struct block_device *bdev,
                int blocksize)
@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
        return -EOPNOTSUPP;
 }
 
-static inline bool dax_lock_mapping_entry(struct page *page)
+static inline dax_entry_t dax_lock_page(struct page *page)
 {
        if (IS_DAX(page->mapping->host))
-               return true;
-       return false;
+               return ~0UL;
+       return 0;
 }
 
-static inline void dax_unlock_mapping_entry(struct page *page)
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
 }
 #endif
index 448dcc448f1fe59cb14de60969f71f8ba6c35278..795ff0b869bbf6403c0e89b87fdc7855e3bbb5cd 100644 (file)
@@ -449,6 +449,13 @@ struct sock_reuseport;
        offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)                             \
        offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER)                                       \
+       offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER)                                       \
+       offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
 
 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                           \
        ({                                                                      \
index 76f8db0b0e715c016cc00cb95aa9a269f12c075d..0705164f928c949720d9a0927f1564119146c3fa 100644 (file)
@@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 }
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
                        struct vm_area_struct *vma, unsigned long addr,
-                       int node);
+                       int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+       alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
 #else
 #define alloc_pages(gfp_mask, order) \
                alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+       alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)                    \
-       alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+       alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
 #define alloc_page_vma_node(gfp_mask, vma, addr, node)         \
-       alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+       alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
index b3e24368930a2246edd2223c8431a448c7f36776..14131b6fae68dda342187a35312c1eee43f4d5a2 100644 (file)
@@ -905,6 +905,13 @@ struct vmbus_channel {
 
        bool probe_done;
 
+       /*
+        * We must offload the handling of the primary/sub channels
+        * from the single-threaded vmbus_connection.work_queue to
+        * two different workqueue, otherwise we can block
+        * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+        */
+       struct work_struct add_channel_work;
 };
 
 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
index bac395f1d00a0f9691b12ec6841f2401a10ca4fc..5228c62af41659bb7d5ae0e7db00969b9f16ef73 100644 (file)
@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 struct mempolicy *get_task_policy(struct task_struct *p);
 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
                unsigned long addr);
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
-                                               unsigned long addr);
 bool vma_policy_mof(struct vm_area_struct *vma);
 
 extern void numa_default_policy(void);
index d37518e89db2ddea762bb483cc74c13040a0f803..d9d9de3fcf8e297a49f86a0e8103c815102fefbb 100644 (file)
@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
  *
  * See the SFF-8472 specification and related documents for the definition
  * of these structure members. This can be obtained from
- * ftp://ftp.seagate.com/sff
+ * https://www.snia.org/technology-communities/sff/specifications
  */
 struct sfp_eeprom_id {
        struct sfp_eeprom_base base;
index 43106ffa6788a40101840008d0c702f4c3586c45..2ec1280602390efe6e5c71413a7a731f6e98f398 100644 (file)
@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
        buf->head[0].iov_base = start;
        buf->head[0].iov_len = len;
        buf->tail[0].iov_len = 0;
-       buf->bvec = NULL;
        buf->pages = NULL;
        buf->page_len = 0;
        buf->flags = 0;
index 414db2bce7150cc94c8b24cc4106161511e4ead1..392138fe59b6929ded24b6dabf317962c5fb3574 100644 (file)
@@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
 extern void tty_release_struct(struct tty_struct *tty, int idx);
 extern int tty_release(struct inode *inode, struct file *filp);
 extern void tty_init_termios(struct tty_struct *tty);
+extern void tty_save_termios(struct tty_struct *tty);
 extern int tty_standard_install(struct tty_driver *driver,
                struct tty_struct *tty);
 
index 4cdd515a4385f1a99d95a7bb7b5b29b09a5c1c1a..5e49e82c43684854c379e18a1d698d79ac4ef347 100644 (file)
@@ -407,11 +407,11 @@ struct usb_host_bos {
 };
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
-       unsigned char type, void **ptr);
+       unsigned char type, void **ptr, size_t min);
 #define usb_get_extra_descriptor(ifpoint, type, ptr) \
                                __usb_get_extra_descriptor((ifpoint)->extra, \
                                (ifpoint)->extralen, \
-                               type, (void **)ptr)
+                               type, (void **)ptr, sizeof(**(ptr)))
 
 /* ----------------------------------------------------------------------- */
 
index 0ce75c35131f114f24998c6d4332cba5eb20d843..bd36d74316984161bd7e1828b60c189affdf8af3 100644 (file)
@@ -68,7 +68,7 @@ struct media_request {
        unsigned int access_count;
        struct list_head objects;
        unsigned int num_incomplete_objects;
-       struct wait_queue_head poll_wait;
+       wait_queue_head_t poll_wait;
        spinlock_t lock;
 };
 
index f58b384aa6c9e0fd67b05f59bd921ab38bb67923..665990c7dec8c127e2eb8321b80512760f0e824e 100644 (file)
@@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 
 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
+       unsigned int hh_alen = 0;
        unsigned int seq;
        unsigned int hh_len;
 
@@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
                seq = read_seqbegin(&hh->hh_lock);
                hh_len = hh->hh_len;
                if (likely(hh_len <= HH_DATA_MOD)) {
-                       /* this is inlined by gcc */
-                       memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+                       hh_alen = HH_DATA_MOD;
+
+                       /* skb_push() would proceed silently if we have room for
+                        * the unaligned size but not for the aligned size:
+                        * check headroom explicitly.
+                        */
+                       if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+                               /* this is inlined by gcc */
+                               memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+                                      HH_DATA_MOD);
+                       }
                } else {
-                       unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+                       hh_alen = HH_DATA_ALIGN(hh_len);
 
-                       memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+                       if (likely(skb_headroom(skb) >= hh_alen)) {
+                               memcpy(skb->data - hh_alen, hh->hh_data,
+                                      hh_alen);
+                       }
                }
        } while (read_seqretry(&hh->hh_lock, seq));
 
-       skb_push(skb, hh_len);
+       if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+               kfree_skb(skb);
+               return NET_XMIT_DROP;
+       }
+
+       __skb_push(skb, hh_len);
        return dev_queue_xmit(skb);
 }
 
index ab9242e51d9e04ac6f4a18d50710c03dbc82ff24..2abbc15824af953589d8fb4eb5c15e6d2e4a4c3d 100644 (file)
@@ -620,4 +620,9 @@ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
        return false;
 }
 
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+       return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
 #endif /* __net_sctp_h__ */
index a11f9379047698886713a0f56a950bf93dd367b3..feada358d872f3bc5622f5787078fde4cbfa6a97 100644 (file)
@@ -2075,6 +2075,8 @@ struct sctp_association {
 
        __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
        __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+       struct rcu_head rcu;
 };
 
 
index 2dd37cada7c088d7bd6c8b4bd35f18c28c63cc86..888a833d3b003f81f475ef5b695a74739a68c6fe 100644 (file)
@@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
 static inline int snd_interval_single(const struct snd_interval *i)
 {
        return (i->min == i->max || 
-               (i->min + 1 == i->max && i->openmax));
+               (i->min + 1 == i->max && (i->openmin || i->openmax)));
 }
 
 static inline int snd_interval_value(const struct snd_interval *i)
 {
+       if (i->openmin && !i->openmax)
+               return i->max;
        return i->min;
 }
 
index 538546edbfbd2bd1cfca431aa95864f018fcc7ee..c7f3321fbe4384260da20a6a1fe7cbf48ecfec72 100644 (file)
@@ -760,8 +760,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_ftruncate __NR3264_ftruncate
 #define __NR_lseek __NR3264_lseek
 #define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
 #define __NR_newfstatat __NR3264_fstatat
 #define __NR_fstat __NR3264_fstat
+#endif
 #define __NR_mmap __NR3264_mmap
 #define __NR_fadvise64 __NR3264_fadvise64
 #ifdef __NR3264_stat
@@ -776,8 +778,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_ftruncate64 __NR3264_ftruncate
 #define __NR_llseek __NR3264_lseek
 #define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
 #define __NR_fstatat64 __NR3264_fstatat
 #define __NR_fstat64 __NR3264_fstat
+#endif
 #define __NR_mmap2 __NR3264_mmap
 #define __NR_fadvise64_64 __NR3264_fadvise64
 #ifdef __NR3264_stat
index 852dc17ab47a07f2580ade5f9e4a1130ee779c26..72c453a8bf50ed5cd4a0383997f5727048ce8d60 100644 (file)
@@ -2170,7 +2170,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2187,12 +2187,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2201,8 +2203,10 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for UDP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2219,12 +2223,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2233,6 +2239,8 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
  * int bpf_sk_release(struct bpf_sock *sk)
  *     Description
@@ -2405,6 +2413,9 @@ enum bpf_func_id {
 /* BPF_FUNC_perf_event_output for sk_buff input context. */
 #define BPF_F_CTXLEN_MASK              (0xfffffULL << 32)
 
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS            (-1L)
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
        BPF_LWT_ENCAP_SEG6_INLINE
 };
 
+#define __bpf_md_ptr(type, name)       \
+union {                                        \
+       type name;                      \
+       __u64 :64;                      \
+} __attribute__((aligned(8)))
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -2456,7 +2473,7 @@ struct __sk_buff {
        /* ... here. */
 
        __u32 data_meta;
-       struct bpf_flow_keys *flow_keys;
+       __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
 };
 
 struct bpf_tunnel_key {
@@ -2572,8 +2589,8 @@ enum sk_action {
  * be added to the end of this structure
  */
 struct sk_msg_md {
-       void *data;
-       void *data_end;
+       __bpf_md_ptr(void *, data);
+       __bpf_md_ptr(void *, data_end);
 
        __u32 family;
        __u32 remote_ip4;       /* Stored in network byte order */
@@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
         * Start of directly accessible data. It begins from
         * the tcp/udp header.
         */
-       void *data;
-       void *data_end;         /* End of directly accessible data */
+       __bpf_md_ptr(void *, data);
+       /* End of directly accessible data */
+       __bpf_md_ptr(void *, data_end);
        /*
         * Total length of packet (starting from the tcp/udp header).
         * Note that the directly accessible bytes (data_end - data)
index ee4c82667d659019022f42cfc3c4a13cf9fc9124..4da543d6bea29c315b896848269e873f78ac09dd 100644 (file)
@@ -5,6 +5,7 @@
 #include <uapi/linux/types.h>
 #include <linux/seq_file.h>
 #include <linux/compiler.h>
+#include <linux/ctype.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/anon_inodes.h>
@@ -426,6 +427,30 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
                offset < btf->hdr.str_len;
 }
 
+/* Only C-style identifier is permitted. This can be relaxed if
+ * necessary.
+ */
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+{
+       /* offset must be valid */
+       const char *src = &btf->strings[offset];
+       const char *src_limit;
+
+       if (!isalpha(*src) && *src != '_')
+               return false;
+
+       /* set a limit on identifier length */
+       src_limit = src + KSYM_NAME_LEN;
+       src++;
+       while (*src && src < src_limit) {
+               if (!isalnum(*src) && *src != '_')
+                       return false;
+               src++;
+       }
+
+       return !*src;
+}
+
 static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
 {
        if (!offset)
@@ -1143,6 +1168,22 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* typedef type must have a valid name, and other ref types,
+        * volatile, const, restrict, should have a null name.
+        */
+       if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
+               if (!t->name_off ||
+                   !btf_name_valid_identifier(env->btf, t->name_off)) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+       } else {
+               if (t->name_off) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        return 0;
@@ -1300,6 +1341,13 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* fwd type must have a valid name */
+       if (!t->name_off ||
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        return 0;
@@ -1356,6 +1404,12 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* array type should not have a name */
+       if (t->name_off) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        if (btf_type_vlen(t)) {
                btf_verifier_log_type(env, t, "vlen != 0");
                return -EINVAL;
@@ -1532,6 +1586,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* struct type either no name or a valid one */
+       if (t->name_off &&
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        last_offset = 0;
@@ -1543,6 +1604,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               /* struct member either no name or a valid one */
+               if (member->name_off &&
+                   !btf_name_valid_identifier(btf, member->name_off)) {
+                       btf_verifier_log_member(env, t, member, "Invalid name");
+                       return -EINVAL;
+               }
                /* A member cannot be in type void */
                if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
                        btf_verifier_log_member(env, t, member,
@@ -1730,6 +1797,13 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* enum type either no name or a valid one */
+       if (t->name_off &&
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        for (i = 0; i < nr_enums; i++) {
@@ -1739,6 +1813,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               /* enum member must have a valid name */
+               if (!enums[i].name_off ||
+                   !btf_name_valid_identifier(btf, enums[i].name_off)) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+
+
                btf_verifier_log(env, "\t%s val=%d\n",
                                 btf_name_by_offset(btf, enums[i].name_off),
                                 enums[i].val);
index 6dd419550aba4b98a45ee1e5b1cf6fde4bd7c354..fc760d00a38c497502c28b56aad1a8d426565560 100644 (file)
@@ -175,6 +175,7 @@ struct bpf_verifier_stack_elem {
 
 #define BPF_COMPLEXITY_LIMIT_INSNS     131072
 #define BPF_COMPLEXITY_LIMIT_STACK     1024
+#define BPF_COMPLEXITY_LIMIT_STATES    64
 
 #define BPF_MAP_PTR_UNPRIV     1UL
 #define BPF_MAP_PTR_POISON     ((void *)((0xeB9FUL << 1) +     \
@@ -3751,6 +3752,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
        }
 }
 
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ *  1 - branch will be taken and "goto target" will be executed
+ *  0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
+ */
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+       if (__is_pointer_value(false, reg))
+               return -1;
+
+       switch (opcode) {
+       case BPF_JEQ:
+               if (tnum_is_const(reg->var_off))
+                       return !!tnum_equals_const(reg->var_off, val);
+               break;
+       case BPF_JNE:
+               if (tnum_is_const(reg->var_off))
+                       return !tnum_equals_const(reg->var_off, val);
+               break;
+       case BPF_JGT:
+               if (reg->umin_value > val)
+                       return 1;
+               else if (reg->umax_value <= val)
+                       return 0;
+               break;
+       case BPF_JSGT:
+               if (reg->smin_value > (s64)val)
+                       return 1;
+               else if (reg->smax_value < (s64)val)
+                       return 0;
+               break;
+       case BPF_JLT:
+               if (reg->umax_value < val)
+                       return 1;
+               else if (reg->umin_value >= val)
+                       return 0;
+               break;
+       case BPF_JSLT:
+               if (reg->smax_value < (s64)val)
+                       return 1;
+               else if (reg->smin_value >= (s64)val)
+                       return 0;
+               break;
+       case BPF_JGE:
+               if (reg->umin_value >= val)
+                       return 1;
+               else if (reg->umax_value < val)
+                       return 0;
+               break;
+       case BPF_JSGE:
+               if (reg->smin_value >= (s64)val)
+                       return 1;
+               else if (reg->smax_value < (s64)val)
+                       return 0;
+               break;
+       case BPF_JLE:
+               if (reg->umax_value <= val)
+                       return 1;
+               else if (reg->umin_value > val)
+                       return 0;
+               break;
+       case BPF_JSLE:
+               if (reg->smax_value <= (s64)val)
+                       return 1;
+               else if (reg->smin_value > (s64)val)
+                       return 0;
+               break;
+       }
+
+       return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -4152,21 +4226,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 
        dst_reg = &regs[insn->dst_reg];
 
-       /* detect if R == 0 where R was initialized to zero earlier */
-       if (BPF_SRC(insn->code) == BPF_K &&
-           (opcode == BPF_JEQ || opcode == BPF_JNE) &&
-           dst_reg->type == SCALAR_VALUE &&
-           tnum_is_const(dst_reg->var_off)) {
-               if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
-                   (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
-                       /* if (imm == imm) goto pc+off;
-                        * only follow the goto, ignore fall-through
-                        */
+       if (BPF_SRC(insn->code) == BPF_K) {
+               int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+
+               if (pred == 1) {
+                        /* only follow the goto, ignore fall-through */
                        *insn_idx += insn->off;
                        return 0;
-               } else {
-                       /* if (imm != imm) goto pc+off;
-                        * only follow fall-through branch, since
+               } else if (pred == 0) {
+                       /* only follow fall-through branch, since
                         * that's where the program will go
                         */
                        return 0;
@@ -4980,7 +5048,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        struct bpf_verifier_state_list *new_sl;
        struct bpf_verifier_state_list *sl;
        struct bpf_verifier_state *cur = env->cur_state, *new;
-       int i, j, err;
+       int i, j, err, states_cnt = 0;
 
        sl = env->explored_states[insn_idx];
        if (!sl)
@@ -5007,8 +5075,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
                        return 1;
                }
                sl = sl->next;
+               states_cnt++;
        }
 
+       if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+               return 0;
+
        /* there were no equivalent states, remember current one.
         * technically the current state is not proven to be safe yet,
         * but it will either reach outer most bpf_exit (which means it's safe)
@@ -5148,6 +5220,9 @@ static int do_check(struct bpf_verifier_env *env)
                        goto process_bpf_exit;
                }
 
+               if (signal_pending(current))
+                       return -EAGAIN;
+
                if (need_resched())
                        cond_resched();
 
index 322e97bbb4370a8ce4da1af39187dbc775fa83d4..abbd8da9ac21613d6520e1a8533d3fe0b28dccb0 100644 (file)
@@ -572,7 +572,9 @@ static void put_uprobe(struct uprobe *uprobe)
                 * gets called, we don't get a chance to remove uprobe from
                 * delayed_uprobe_list from remove_breakpoint(). Do it here.
                 */
+               mutex_lock(&delayed_uprobe_lock);
                delayed_uprobe_remove(uprobe, NULL);
+               mutex_unlock(&delayed_uprobe_lock);
                kfree(uprobe);
        }
 }
index 08cb57eed3893d191b53d7e8b46a8d5ae4ba2747..b193a59fc05b0aae11a8c3173f5e1aca099ff113 100644 (file)
@@ -104,7 +104,7 @@ asmlinkage void notrace stackleak_erase(void)
 }
 NOKPROBE_SYMBOL(stackleak_erase);
 
-void __used stackleak_track_stack(void)
+void __used notrace stackleak_track_stack(void)
 {
        /*
         * N.B. stackleak_erase() fills the kernel stack with the poison value,
index 622cced74fd90d4a1bb984b6de04282621254557..5da55b38b1b7fd2878a20b41c180b839096fd872 100644 (file)
@@ -629,40 +629,30 @@ release:
  *         available
  * never: never stall for any thp allocation
  */
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
 {
        const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
-       gfp_t this_node = 0;
-
-#ifdef CONFIG_NUMA
-       struct mempolicy *pol;
-       /*
-        * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
-        * specified, to express a general desire to stay on the current
-        * node for optimistic allocation attempts. If the defrag mode
-        * and/or madvise hint requires the direct reclaim then we prefer
-        * to fallback to other node rather than node reclaim because that
-        * can lead to excessive reclaim even though there is free memory
-        * on other nodes. We expect that NUMA preferences are specified
-        * by memory policies.
-        */
-       pol = get_vma_policy(vma, addr);
-       if (pol->mode != MPOL_BIND)
-               this_node = __GFP_THISNODE;
-       mpol_cond_put(pol);
-#endif
 
+       /* Always do synchronous compaction */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
                return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
+
+       /* Kick kcompactd and fail quickly */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
+               return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+
+       /* Synchronous compaction if madvised, otherwise kick kcompactd */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                                            __GFP_KSWAPD_RECLAIM | this_node);
+               return GFP_TRANSHUGE_LIGHT |
+                       (vma_madvised ? __GFP_DIRECT_RECLAIM :
+                                       __GFP_KSWAPD_RECLAIM);
+
+       /* Only do synchronous compaction if madvised */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                                            this_node);
-       return GFP_TRANSHUGE_LIGHT | this_node;
+               return GFP_TRANSHUGE_LIGHT |
+                      (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
+
+       return GFP_TRANSHUGE_LIGHT;
 }
 
 /* Caller must hold page table lock. */
@@ -734,8 +724,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                        pte_free(vma->vm_mm, pgtable);
                return ret;
        }
-       gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
-       page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
+       gfp = alloc_hugepage_direct_gfpmask(vma);
+       page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
        if (unlikely(!page)) {
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -1305,9 +1295,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 alloc:
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow()) {
-               huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
-               new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
-                               haddr, numa_node_id());
+               huge_gfp = alloc_hugepage_direct_gfpmask(vma);
+               new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
        } else
                new_page = NULL;
 
index 0cd3de3550f0830f507d286b0499789d7961171e..7c72f2a95785e0d3d5df615ea33477b0bdcc5278 100644 (file)
@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
        LIST_HEAD(tokill);
        int rc = -EBUSY;
        loff_t start;
+       dax_entry_t cookie;
 
        /*
         * Prevent the inode from being freed while we are interrogating
@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
         * also prevents changes to the mapping of this pfn until
         * poison signaling is complete.
         */
-       if (!dax_lock_mapping_entry(page))
+       cookie = dax_lock_page(page);
+       if (!cookie)
                goto out;
 
        if (hwpoison_filter(page)) {
@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
        kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
        rc = 0;
 unlock:
-       dax_unlock_mapping_entry(page);
+       dax_unlock_page(page, cookie);
 out:
        /* drop pgmap ref acquired in caller */
        put_dev_pagemap(pgmap);
index 5837a067124d895f38f6039d9e3739f0a0874fc0..d4496d9d34f533dcd66accb7d92a69a03feae65c 100644 (file)
@@ -1116,8 +1116,8 @@ static struct page *new_page(struct page *page, unsigned long start)
        } else if (PageTransHuge(page)) {
                struct page *thp;
 
-               thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
-                               address, numa_node_id());
+               thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
+                                        HPAGE_PMD_ORDER);
                if (!thp)
                        return NULL;
                prep_transhuge_page(thp);
@@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
  * freeing by another task.  It is the caller's responsibility to free the
  * extra reference for shared policies.
  */
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
                                                unsigned long addr)
 {
        struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2011,6 +2011,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  *     @vma:  Pointer to VMA or NULL if not available.
  *     @addr: Virtual Address of the allocation. Must be inside the VMA.
  *     @node: Which node to prefer for allocation (modulo policy).
+ *     @hugepage: for hugepages try only the preferred node if possible
  *
  *     This function allocates a page from the kernel page pool and applies
  *     a NUMA policy associated with the VMA or the current process.
@@ -2021,7 +2022,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  */
 struct page *
 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
-               unsigned long addr, int node)
+               unsigned long addr, int node, bool hugepage)
 {
        struct mempolicy *pol;
        struct page *page;
@@ -2039,6 +2040,31 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                goto out;
        }
 
+       if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
+               int hpage_node = node;
+
+               /*
+                * For hugepage allocation and non-interleave policy which
+                * allows the current node (or other explicitly preferred
+                * node) we only try to allocate from the current/preferred
+                * node and don't fall back to other nodes, as the cost of
+                * remote accesses would likely offset THP benefits.
+                *
+                * If the policy is interleave, or does not allow the current
+                * node in its nodemask, we allocate the standard way.
+                */
+               if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
+                       hpage_node = pol->v.preferred_node;
+
+               nmask = policy_nodemask(gfp, pol);
+               if (!nmask || node_isset(hpage_node, *nmask)) {
+                       mpol_cond_put(pol);
+                       page = __alloc_pages_node(hpage_node,
+                                               gfp | __GFP_THISNODE, order);
+                       goto out;
+               }
+       }
+
        nmask = policy_nodemask(gfp, pol);
        preferred_nid = policy_node(gfp, pol, node);
        page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
index cddc72ac44d868492e95a4badde4ea748d4aafeb..921f80488bb3fdd03cc7ce64a5a5a6d0f015b794 100644 (file)
@@ -1439,7 +1439,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
 
        shmem_pseudo_vma_init(&pvma, info, hindex);
        page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
-                       HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
+                       HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
        shmem_pseudo_vma_destroy(&pvma);
        if (page)
                prep_transhuge_page(page);
index c89c22c49015ff070f228bece397937d7cdce8b5..25001913d03b599dde50e85a20de61156465359b 100644 (file)
@@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
        return ret;
 }
 
-static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
+                       u32 *time)
 {
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
        enum bpf_cgroup_storage_type stype;
        u64 time_start, time_spent = 0;
-       u32 ret = 0, i;
+       u32 i;
 
        for_each_cgroup_storage_type(stype) {
                storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
                repeat = 1;
        time_start = ktime_get_ns();
        for (i = 0; i < repeat; i++) {
-               ret = bpf_test_run_one(prog, ctx, storage);
+               *ret = bpf_test_run_one(prog, ctx, storage);
                if (need_resched()) {
                        if (signal_pending(current))
                                break;
@@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
        for_each_cgroup_storage_type(stype)
                bpf_cgroup_storage_free(storage[stype]);
 
-       return ret;
+       return 0;
 }
 
 static int bpf_test_finish(const union bpf_attr *kattr,
@@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
-       retval = bpf_test_run(prog, skb, repeat, &duration);
+       ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
+       if (ret) {
+               kfree_skb(skb);
+               kfree(sk);
+               return ret;
+       }
        if (!is_l2) {
                if (skb_headroom(skb) < hh_len) {
                        int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
        xdp.rxq = &rxqueue->xdp_rxq;
 
-       retval = bpf_test_run(prog, &xdp, repeat, &duration);
+       ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
+       if (ret)
+               goto out;
        if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
            xdp.data_end != xdp.data + size)
                size = xdp.data_end - xdp.data;
        ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
+out:
        kfree(data);
        return ret;
 }
index ddc551f24ba2afc4cfd9f11826602f92945ff871..722d50dbf8a459f412e8c20982600a28edf695a0 100644 (file)
@@ -2175,6 +2175,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
        return active;
 }
 
+static void reset_xps_maps(struct net_device *dev,
+                          struct xps_dev_maps *dev_maps,
+                          bool is_rxqs_map)
+{
+       if (is_rxqs_map) {
+               static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+               RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+       } else {
+               RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+       }
+       static_key_slow_dec_cpuslocked(&xps_needed);
+       kfree_rcu(dev_maps, rcu);
+}
+
 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
                           struct xps_dev_maps *dev_maps, unsigned int nr_ids,
                           u16 offset, u16 count, bool is_rxqs_map)
@@ -2186,18 +2200,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
             j < nr_ids;)
                active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
                                               count);
-       if (!active) {
-               if (is_rxqs_map) {
-                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-               } else {
-                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+       if (!active)
+               reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
-                       for (i = offset + (count - 1); count--; i--)
-                               netdev_queue_numa_node_write(
-                                       netdev_get_tx_queue(dev, i),
-                                                       NUMA_NO_NODE);
+       if (!is_rxqs_map) {
+               for (i = offset + (count - 1); count--; i--) {
+                       netdev_queue_numa_node_write(
+                               netdev_get_tx_queue(dev, i),
+                               NUMA_NO_NODE);
                }
-               kfree_rcu(dev_maps, rcu);
        }
 }
 
@@ -2234,10 +2245,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
                       false);
 
 out_no_maps:
-       if (static_key_enabled(&xps_rxqs_needed))
-               static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
-
-       static_key_slow_dec_cpuslocked(&xps_needed);
        mutex_unlock(&xps_map_mutex);
        cpus_read_unlock();
 }
@@ -2355,9 +2362,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
        if (!new_dev_maps)
                goto out_no_new_maps;
 
-       static_key_slow_inc_cpuslocked(&xps_needed);
-       if (is_rxqs_map)
-               static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+       if (!dev_maps) {
+               /* Increment static keys at most once per type */
+               static_key_slow_inc_cpuslocked(&xps_needed);
+               if (is_rxqs_map)
+                       static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+       }
 
        for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
@@ -2455,13 +2465,8 @@ out_no_new_maps:
        }
 
        /* free map if not active */
-       if (!active) {
-               if (is_rxqs_map)
-                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-               else
-                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
-               kfree_rcu(dev_maps, rcu);
-       }
+       if (!active)
+               reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
 out_no_maps:
        mutex_unlock(&xps_map_mutex);
@@ -5009,7 +5014,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
                struct net_device *orig_dev = skb->dev;
                struct packet_type *pt_prev = NULL;
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
                if (!pt_prev)
                        continue;
@@ -5165,7 +5170,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
        INIT_LIST_HEAD(&sublist);
        list_for_each_entry_safe(skb, next, head, list) {
                net_timestamp_check(netdev_tstamp_prequeue, skb);
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                if (!skb_defer_rx_timestamp(skb))
                        list_add_tail(&skb->list, &sublist);
        }
@@ -5176,7 +5181,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
                rcu_read_lock();
                list_for_each_entry_safe(skb, next, head, list) {
                        xdp_prog = rcu_dereference(skb->dev->xdp_prog);
-                       list_del(&skb->list);
+                       skb_list_del_init(skb);
                        if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
                                list_add_tail(&skb->list, &sublist);
                }
@@ -5195,7 +5200,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
 
                        if (cpu >= 0) {
                                /* Will be handled, remove from list */
-                               list_del(&skb->list);
+                               skb_list_del_init(skb);
                                enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        }
                }
@@ -6204,8 +6209,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
        napi->skb = NULL;
        napi->poll = poll;
        if (weight > NAPI_POLL_WEIGHT)
-               pr_err_once("netif_napi_add() called with weight %d on device %s\n",
-                           weight, dev->name);
+               netdev_err_once(dev, "%s() called with weight %d\n", __func__,
+                               weight);
        napi->weight = weight;
        list_add(&napi->dev_list, &dev->napi_list);
        napi->dev = dev;
index 9a1327eb25faf2c73a37d253f88b19c6b92414d6..8d2c629501e2df10b0b4113986cac71369863789 100644 (file)
@@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        struct net *net;
 
        family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
-       if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
+       if (unlikely(family == AF_UNSPEC || flags ||
+                    !((s32)netns_id < 0 || netns_id <= S32_MAX)))
                goto out;
 
        if (skb->dev)
                caller_net = dev_net(skb->dev);
        else
                caller_net = sock_net(skb->sk);
-       if (netns_id) {
+       if ((s32)netns_id < 0) {
+               net = caller_net;
+               sk = sk_lookup(net, tuple, skb, family, proto);
+       } else {
                net = get_net_ns_by_id(caller_net, netns_id);
                if (unlikely(!net))
                        goto out;
                sk = sk_lookup(net, tuple, skb, family, proto);
                put_net(net);
-       } else {
-               net = caller_net;
-               sk = sk_lookup(net, tuple, skb, family, proto);
        }
 
        if (sk)
@@ -5435,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
                if (size != size_default)
                        return false;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
-               if (size != sizeof(struct bpf_flow_keys *))
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+               if (size != sizeof(__u64))
                        return false;
                break;
        default:
@@ -5464,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_meta):
        case bpf_ctx_range(struct __sk_buff, data_end):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
                return false;
        }
@@ -5489,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size,
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
@@ -5530,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                return false;
        }
 
@@ -5756,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
                return false;
        }
@@ -5958,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size,
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                return false;
        }
 
@@ -6039,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
        case bpf_ctx_range(struct __sk_buff, tc_classid):
index 33d9227a8b8077a8cf6edbcaaa9f5b92d4fee48e..7819f7804eeb80fcac76f32ee1aacdc0a180d6ca 100644 (file)
@@ -3800,6 +3800,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
 {
        int err;
 
+       if (dev->type != ARPHRD_ETHER)
+               return -EINVAL;
+
        netif_addr_lock_bh(dev);
        err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
        if (err)
index c90ee3227deab6281c3df301d14bb6e81a6e1011..5e8c9bef78bd2ec26b405d1de6e9a74845492e8b 100644 (file)
@@ -158,8 +158,31 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
        cpu_dp->orig_ethtool_ops = NULL;
 }
 
+static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
+                           char *buf)
+{
+       struct net_device *dev = to_net_dev(d);
+       struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+       return sprintf(buf, "%s\n",
+                      dsa_tag_protocol_to_str(cpu_dp->tag_ops));
+}
+static DEVICE_ATTR_RO(tagging);
+
+static struct attribute *dsa_slave_attrs[] = {
+       &dev_attr_tagging.attr,
+       NULL
+};
+
+static const struct attribute_group dsa_group = {
+       .name   = "dsa",
+       .attrs  = dsa_slave_attrs,
+};
+
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
+       int ret;
+
        /* If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
         * sent to the tag format's receive function.
@@ -168,11 +191,20 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 
        dev->dsa_ptr = cpu_dp;
 
-       return dsa_master_ethtool_setup(dev);
+       ret = dsa_master_ethtool_setup(dev);
+       if (ret)
+               return ret;
+
+       ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
+       if (ret)
+               dsa_master_ethtool_teardown(dev);
+
+       return ret;
 }
 
 void dsa_master_teardown(struct net_device *dev)
 {
+       sysfs_remove_group(&dev->dev.kobj, &dsa_group);
        dsa_master_ethtool_teardown(dev);
 
        dev->dsa_ptr = NULL;
index 7d0c19e7edcf00c556fca25dc8fcdc121d82a79a..aec78f5aca72d197038246fe5462b05c7394a004 100644 (file)
@@ -1058,27 +1058,6 @@ static struct device_type dsa_type = {
        .name   = "dsa",
 };
 
-static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
-                           char *buf)
-{
-       struct net_device *dev = to_net_dev(d);
-       struct dsa_port *dp = dsa_slave_to_port(dev);
-
-       return sprintf(buf, "%s\n",
-                      dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops));
-}
-static DEVICE_ATTR_RO(tagging);
-
-static struct attribute *dsa_slave_attrs[] = {
-       &dev_attr_tagging.attr,
-       NULL
-};
-
-static const struct attribute_group dsa_group = {
-       .name   = "dsa",
-       .attrs  = dsa_slave_attrs,
-};
-
 static void dsa_slave_phylink_validate(struct net_device *dev,
                                       unsigned long *supported,
                                       struct phylink_link_state *state)
@@ -1374,14 +1353,8 @@ int dsa_slave_create(struct dsa_port *port)
                goto out_phy;
        }
 
-       ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group);
-       if (ret)
-               goto out_unreg;
-
        return 0;
 
-out_unreg:
-       unregister_netdev(slave_dev);
 out_phy:
        rtnl_lock();
        phylink_disconnect_phy(p->dp->pl);
@@ -1405,7 +1378,6 @@ void dsa_slave_destroy(struct net_device *slave_dev)
        rtnl_unlock();
 
        dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
-       sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group);
        unregister_netdev(slave_dev);
        phylink_destroy(dp->pl);
        free_percpu(p->stats64);
index d6ee343fdb8647ea96240d017b72aef2f6790299..aa0b22697998ab60f0013bf65cea9cef2913f61f 100644 (file)
@@ -515,6 +515,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
        struct rb_node *rbn;
        int len;
        int ihlen;
+       int delta;
        int err;
        u8 ecn;
 
@@ -556,10 +557,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
        if (len > 65535)
                goto out_oversize;
 
+       delta = - head->truesize;
+
        /* Head of list must not be cloned. */
        if (skb_unclone(head, GFP_ATOMIC))
                goto out_nomem;
 
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(qp->q.net, delta);
+
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
index 35a786c0aaa064888540774863cd08e1f21f12a8..e609b08c9df4f562f01c1a4aba97fc54f6f97694 100644 (file)
@@ -547,7 +547,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
        list_for_each_entry_safe(skb, next, head, list) {
                struct dst_entry *dst;
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                /* if ingress device is enslaved to an L3 master device pass the
                 * skb to its handler for processing
                 */
@@ -594,7 +594,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
                struct net_device *dev = skb->dev;
                struct net *net = dev_net(dev);
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                skb = ip_rcv_core(skb, net);
                if (skb == NULL)
                        continue;
index 3f510cad0b3ec884aeb23f58aaa597ec98c82c88..d1676d8a6ed70fbe050709a16a650df35a1f4d87 100644 (file)
@@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
  * This algorithm is from John Heffner.
  */
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
-                                bool *is_cwnd_limited, u32 max_segs)
+                                bool *is_cwnd_limited,
+                                bool *is_rwnd_limited,
+                                u32 max_segs)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 age, send_win, cong_win, limit, in_flight;
@@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        struct sk_buff *head;
        int win_divisor;
 
-       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
-               goto send_now;
-
        if (icsk->icsk_ca_state >= TCP_CA_Recovery)
                goto send_now;
 
@@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        if (age < (tp->srtt_us >> 4))
                goto send_now;
 
-       /* Ok, it looks like it is advisable to defer. */
+       /* Ok, it looks like it is advisable to defer.
+        * Three cases are tracked :
+        * 1) We are cwnd-limited
+        * 2) We are rwnd-limited
+        * 3) We are application limited.
+        */
+       if (cong_win < send_win) {
+               if (cong_win <= skb->len) {
+                       *is_cwnd_limited = true;
+                       return true;
+               }
+       } else {
+               if (send_win <= skb->len) {
+                       *is_rwnd_limited = true;
+                       return true;
+               }
+       }
 
-       if (cong_win < send_win && cong_win <= skb->len)
-               *is_cwnd_limited = true;
+       /* If this packet won't get more data, do not wait. */
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               goto send_now;
 
        return true;
 
@@ -2356,7 +2372,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                } else {
                        if (!push_one &&
                            tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
-                                                max_segs))
+                                                &is_rwnd_limited, max_segs))
                                break;
                }
 
@@ -2494,15 +2510,18 @@ void tcp_send_loss_probe(struct sock *sk)
                goto rearm_timer;
        }
        skb = skb_rb_last(&sk->tcp_rtx_queue);
+       if (unlikely(!skb)) {
+               WARN_ONCE(tp->packets_out,
+                         "invalid inflight: %u state %u cwnd %u mss %d\n",
+                         tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+               inet_csk(sk)->icsk_pending = 0;
+               return;
+       }
 
        /* At most one outstanding TLP retransmission. */
        if (tp->tlp_high_seq)
                goto rearm_timer;
 
-       /* Retransmit last segment. */
-       if (WARN_ON(!skb))
-               goto rearm_timer;
-
        if (skb_still_in_host_queue(sk, skb))
                goto rearm_timer;
 
@@ -2920,7 +2939,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
                trace_tcp_retransmit_skb(sk, skb);
        } else if (err != -EBUSY) {
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
        }
        return err;
 }
index 091c53925e4da6b2b154d166682a0ac0aefd7ecb..f87dbc78b6bcb85e12b72bdf57679a36440bb5bf 100644 (file)
@@ -378,7 +378,7 @@ static void tcp_probe_timer(struct sock *sk)
                        return;
        }
 
-       if (icsk->icsk_probes_out > max_probes) {
+       if (icsk->icsk_probes_out >= max_probes) {
 abort:         tcp_write_err(sk);
        } else {
                /* Only send another probe if we didn't close things up. */
@@ -484,11 +484,12 @@ void tcp_retransmit_timer(struct sock *sk)
                goto out_reset_timer;
        }
 
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
        if (tcp_write_timeout(sk))
                goto out;
 
        if (icsk->icsk_retransmits == 0) {
-               int mib_idx;
+               int mib_idx = 0;
 
                if (icsk->icsk_ca_state == TCP_CA_Recovery) {
                        if (tcp_is_sack(tp))
@@ -503,10 +504,9 @@ void tcp_retransmit_timer(struct sock *sk)
                                mib_idx = LINUX_MIB_TCPSACKFAILURES;
                        else
                                mib_idx = LINUX_MIB_TCPRENOFAILURES;
-               } else {
-                       mib_idx = LINUX_MIB_TCPTIMEOUTS;
                }
-               __NET_INC_STATS(sock_net(sk), mib_idx);
+               if (mib_idx)
+                       __NET_INC_STATS(sock_net(sk), mib_idx);
        }
 
        tcp_enter_loss(sk);
index 96577e742afd496eaed410395620be5a8e24430f..c1d85830c906f68bdd2310e411b44dca98e3db72 100644 (file)
@@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
        list_for_each_entry_safe(skb, next, head, list) {
                struct dst_entry *dst;
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                /* if ingress device is enslaved to an L3 master device pass the
                 * skb to its handler for processing
                 */
@@ -296,7 +296,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
                struct net_device *dev = skb->dev;
                struct net *net = dev_net(dev);
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                skb = ip6_rcv_core(skb, dev, net);
                if (skb == NULL)
                        continue;
index 827a3f5ff3bbdff2e66e09bb90b5b2195b9ef297..fcd3c66ded1620d0d320fd7b1753f2734d1dfa46 100644 (file)
@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        const struct ipv6_pinfo *np = inet6_sk(sk);
        struct in6_addr *first_hop = &fl6->daddr;
        struct dst_entry *dst = skb_dst(skb);
+       unsigned int head_room;
        struct ipv6hdr *hdr;
        u8  proto = fl6->flowi6_proto;
        int seg_len = skb->len;
        int hlimit = -1;
        u32 mtu;
 
-       if (opt) {
-               unsigned int head_room;
+       head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
+       if (opt)
+               head_room += opt->opt_nflen + opt->opt_flen;
 
-               /* First: exthdrs may take lots of space (~8K for now)
-                  MAX_HEADER is not enough.
-                */
-               head_room = opt->opt_nflen + opt->opt_flen;
-               seg_len += head_room;
-               head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
-
-               if (skb_headroom(skb) < head_room) {
-                       struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
-                       if (!skb2) {
-                               IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                                             IPSTATS_MIB_OUTDISCARDS);
-                               kfree_skb(skb);
-                               return -ENOBUFS;
-                       }
-                       if (skb->sk)
-                               skb_set_owner_w(skb2, skb->sk);
-                       consume_skb(skb);
-                       skb = skb2;
+       if (unlikely(skb_headroom(skb) < head_room)) {
+               struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
+               if (!skb2) {
+                       IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                     IPSTATS_MIB_OUTDISCARDS);
+                       kfree_skb(skb);
+                       return -ENOBUFS;
                }
+               if (skb->sk)
+                       skb_set_owner_w(skb2, skb->sk);
+               consume_skb(skb);
+               skb = skb2;
+       }
+
+       if (opt) {
+               seg_len += opt->opt_nflen + opt->opt_flen;
+
                if (opt->opt_flen)
                        ipv6_push_frag_opts(skb, opt, &proto);
+
                if (opt->opt_nflen)
                        ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
                                             &fl6->saddr);
index d219979c3e529c32e029865debc788109d05ad83..181da2c40f9a98f99b0491dfcc589aec73ad977e 100644 (file)
@@ -341,7 +341,7 @@ static bool
 nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_device *dev)
 {
        struct sk_buff *fp, *head = fq->q.fragments;
-       int    payload_len;
+       int    payload_len, delta;
        u8 ecn;
 
        inet_frag_kill(&fq->q);
@@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
                return false;
        }
 
+       delta = - head->truesize;
+
        /* Head of list must not be cloned. */
        if (skb_unclone(head, GFP_ATOMIC))
                return false;
 
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(fq->q.net, delta);
+
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
index 5c3c9271309620b6ca8bf0a7d7e10459bea8dc40..aa26c45486d94ab2f2f9f443b837642d8b582f83 100644 (file)
@@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 {
        struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
        struct sk_buff *fp, *head = fq->q.fragments;
-       int    payload_len;
+       int    payload_len, delta;
        unsigned int nhoff;
        int sum_truesize;
        u8 ecn;
@@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        if (payload_len > IPV6_MAXPLEN)
                goto out_oversize;
 
+       delta = - head->truesize;
+
        /* Head of list must not be cloned. */
        if (skb_unclone(head, GFP_ATOMIC))
                goto out_oom;
 
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(fq->q.net, delta);
+
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
index a8854dd3e9c5ef64a7a480bb6ff891fac0e6d1ea..8181ee7e1e27051040bd3bbce9d6a228632a5297 100644 (file)
@@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                struct ipv6hdr *hdr = ipv6_hdr(skb);
                struct flowi6 fl6;
 
+               memset(&fl6, 0, sizeof(fl6));
                fl6.daddr = hdr->daddr;
                fl6.saddr = hdr->saddr;
                fl6.flowlabel = ip6_flowinfo(hdr);
index 51622333d4602a60fa39a877d7fbb721ada64bf1..818aa006034950785768d80a3f5ba8ad5f2f7f0f 100644 (file)
@@ -2891,7 +2891,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
 
        len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
              beacon->proberesp_ies_len + beacon->assocresp_ies_len +
-             beacon->probe_resp_len;
+             beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len;
 
        new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
        if (!new_beacon)
@@ -2934,8 +2934,9 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
                memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
                pos += beacon->probe_resp_len;
        }
-       if (beacon->ftm_responder)
-               new_beacon->ftm_responder = beacon->ftm_responder;
+
+       /* might copy -1, meaning no changes requested */
+       new_beacon->ftm_responder = beacon->ftm_responder;
        if (beacon->lci) {
                new_beacon->lci_len = beacon->lci_len;
                new_beacon->lci = pos;
index 5836ddeac9e34ecd2aa6e51363679d2cd11f266d..5f3c81e705c7df9ea7ff7c69eb3b6aff00df17ee 100644 (file)
@@ -1015,6 +1015,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        if (local->open_count == 0)
                ieee80211_clear_tx_pending(local);
 
+       sdata->vif.bss_conf.beacon_int = 0;
+
        /*
         * If the interface goes down while suspended, presumably because
         * the device was unplugged and that happens before our resume,
index d2bc8d57c87eb40b943873732e5705514fcd2fc2..bcf5ffc1567a4ff1c6054f7aecc0aa28dfd2c3fa 100644 (file)
@@ -2766,6 +2766,7 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct sta_info *sta;
+       bool result = true;
 
        sdata_info(sdata, "authenticated\n");
        ifmgd->auth_data->done = true;
@@ -2778,15 +2779,18 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
        sta = sta_info_get(sdata, bssid);
        if (!sta) {
                WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
-               return false;
+               result = false;
+               goto out;
        }
        if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
                sdata_info(sdata, "failed moving %pM to auth\n", bssid);
-               return false;
+               result = false;
+               goto out;
        }
-       mutex_unlock(&sdata->local->sta_mtx);
 
-       return true;
+out:
+       mutex_unlock(&sdata->local->sta_mtx);
+       return result;
 }
 
 static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
index 3bd3b57697970f8ad2c756d1ab69a360b465ff38..428f7ad5f9b59f7964405c1c534d2b1a130fe25e 100644 (file)
@@ -1403,6 +1403,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
                return RX_CONTINUE;
 
        if (ieee80211_is_ctl(hdr->frame_control) ||
+           ieee80211_is_nullfunc(hdr->frame_control) ||
            ieee80211_is_qos_nullfunc(hdr->frame_control) ||
            is_multicast_ether_addr(hdr->addr1))
                return RX_CONTINUE;
@@ -3063,7 +3064,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                        cfg80211_sta_opmode_change_notify(sdata->dev,
                                                          rx->sta->addr,
                                                          &sta_opmode,
-                                                         GFP_KERNEL);
+                                                         GFP_ATOMIC);
                        goto handled;
                }
                case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
@@ -3100,7 +3101,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                        cfg80211_sta_opmode_change_notify(sdata->dev,
                                                          rx->sta->addr,
                                                          &sta_opmode,
-                                                         GFP_KERNEL);
+                                                         GFP_ATOMIC);
                        goto handled;
                }
                default:
index aa4afbf0abaf1727b62fe1e6a9ce9605d549b856..a794ca7290001a778d7ef5fde62a7c32b7c110b1 100644 (file)
@@ -964,6 +964,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
                        /* Track when last TDLS packet was ACKed */
                        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
                                sta->status_stats.last_tdls_pkt_time = jiffies;
+               } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+                       return;
                } else {
                        ieee80211_lost_packet(sta, info);
                }
index e0ccee23fbcdb209a6f7f5704c2aea2d2ef74782..1f536ba573b4852ef18f1a80129e56c4961be520 100644 (file)
@@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
                info->hw_queue = tx->sdata->vif.cab_queue;
 
-       /* no stations in PS mode */
-       if (!atomic_read(&ps->num_sta_ps))
+       /* no stations in PS mode and no buffered packets */
+       if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
                return TX_CONTINUE;
 
        info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
index a4660c48ff0149ad1798cd646d4edb2ed5c7770f..cd94f925495a5ef6b6d2192961a0c97d6ac1764f 100644 (file)
@@ -1166,7 +1166,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                                &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                        if (err) {
                                net_warn_ratelimited("openvswitch: zone: %u "
-                                       "execeeds conntrack limit\n",
+                                       "exceeds conntrack limit\n",
                                        info->zone.id);
                                return err;
                        }
index 37c9b8f0e10f07c65b48087cbc5ef1335eecb850..ec8ec55e0fe879a35ff17f6cc6542f5efa451886 100644 (file)
@@ -85,7 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                               int ovr, int bind, bool rtnl_held,
                               struct netlink_ext_ack *extack)
 {
-       int ret = 0, err;
+       int ret = 0, tcfp_result = TC_ACT_OK, err, size;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
        struct tcf_police *police;
@@ -93,7 +93,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        struct tc_action_net *tn = net_generic(net, police_net_id);
        struct tcf_police_params *new;
        bool exists = false;
-       int size;
 
        if (nla == NULL)
                return -EINVAL;
@@ -160,6 +159,16 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                goto failure;
        }
 
+       if (tb[TCA_POLICE_RESULT]) {
+               tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
+               if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
+                       NL_SET_ERR_MSG(extack,
+                                      "goto chain not allowed on fallback");
+                       err = -EINVAL;
+                       goto failure;
+               }
+       }
+
        new = kzalloc(sizeof(*new), GFP_KERNEL);
        if (unlikely(!new)) {
                err = -ENOMEM;
@@ -167,6 +176,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        }
 
        /* No failure allowed after this point */
+       new->tcfp_result = tcfp_result;
        new->tcfp_mtu = parm->mtu;
        if (!new->tcfp_mtu) {
                new->tcfp_mtu = ~0;
@@ -196,16 +206,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_POLICE_AVRATE])
                new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
 
-       if (tb[TCA_POLICE_RESULT]) {
-               new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
-               if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) {
-                       NL_SET_ERR_MSG(extack,
-                                      "goto chain not allowed on fallback");
-                       err = -EINVAL;
-                       goto failure;
-               }
-       }
-
        spin_lock_bh(&police->tcf_lock);
        spin_lock_bh(&police->tcfp_lock);
        police->tcfp_t_c = ktime_get_ns();
index c6c327874abcc9974adf2675c8326a967c6df6f6..71312d7bd8f490c9b8200ccaac59ea0cd0031da6 100644 (file)
@@ -1238,18 +1238,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        if (err)
                goto errout_idr;
 
-       if (!tc_skip_sw(fnew->flags)) {
-               if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
-                       err = -EEXIST;
-                       goto errout_mask;
-               }
-
-               err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
-                                            fnew->mask->filter_ht_params);
-               if (err)
-                       goto errout_mask;
+       if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
+               err = -EEXIST;
+               goto errout_mask;
        }
 
+       err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+                                    fnew->mask->filter_ht_params);
+       if (err)
+               goto errout_mask;
+
        if (!tc_skip_hw(fnew->flags)) {
                err = fl_hw_replace_filter(tp, fnew, extack);
                if (err)
@@ -1303,9 +1301,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
        struct cls_fl_head *head = rtnl_dereference(tp->root);
        struct cls_fl_filter *f = arg;
 
-       if (!tc_skip_sw(f->flags))
-               rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
-                                      f->mask->filter_ht_params);
+       rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
+                              f->mask->filter_ht_params);
        __fl_delete(tp, f, extack);
        *last = list_empty(&head->masks);
        return 0;
index 2c38e3d0792468162ee0dc4137f1400160ab9276..22cd46a600576f286803536d45875cd9d537cdca 100644 (file)
@@ -431,6 +431,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        int count = 1;
        int rc = NET_XMIT_SUCCESS;
 
+       /* Do not fool qdisc_drop_all() */
+       skb->prev = NULL;
+
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
                ++count;
index 6a28b96e779e68d5259138e11da17c1216e18b71..914750b819b2661986a1dca9d0b049a68d020e67 100644 (file)
@@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
        asoc->flowlabel = sp->flowlabel;
        asoc->dscp = sp->dscp;
 
-       /* Initialize default path MTU. */
-       asoc->pathmtu = sp->pathmtu;
-
        /* Set association default SACK delay */
        asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
        asoc->sackfreq = sp->sackfreq;
@@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init(
                             0, gfp))
                goto fail_init;
 
+       /* Initialize default path MTU. */
+       asoc->pathmtu = sp->pathmtu;
+       sctp_assoc_update_frag_point(asoc);
+
        /* Assume that peer would support both address types unless we are
         * told otherwise.
         */
@@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
 
        WARN_ON(atomic_read(&asoc->rmem_alloc));
 
-       kfree(asoc);
+       kfree_rcu(asoc, rcu);
        SCTP_DBG_OBJCNT_DEC(assoc);
 }
 
index ce8087846f05947d2990f6b6deebcadc7c255ac1..d2048de86e7c267d11b6fadd16535ddd7d8fc1b4 100644 (file)
@@ -191,6 +191,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
         * the packet
         */
        max_data = asoc->frag_point;
+       if (unlikely(!max_data)) {
+               max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
+                                              sctp_datachk_len(&asoc->stream));
+               pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
+                                   __func__, asoc, max_data);
+       }
 
        /* If the the peer requested that we authenticate DATA chunks
         * we need to account for bundling of the AUTH chunks along with
index 4a4fd19712552b9ac3429897cf9f78e65db6214d..f4ac6c592e1396e136311defe312be22ece411d8 100644 (file)
@@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
                             asoc->c.sinit_max_instreams, gfp))
                goto clean_up;
 
+       /* Update frag_point when stream_interleave may get changed. */
+       sctp_assoc_update_frag_point(asoc);
+
        if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
                goto clean_up;
 
index bf618d1b41fd20d0799726a4f3bc9081f4e48599..b8cebd5a87e5c3571cbc184324ed483d3e6eb9bd 100644 (file)
@@ -3324,8 +3324,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
                __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
                                 sizeof(struct sctp_data_chunk);
 
-               min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT,
-                                          datasize);
+               min_len = sctp_min_frag_point(sp, datasize);
                max_len = SCTP_MAX_CHUNK_LEN - datasize;
 
                if (val < min_len || val > max_len)
index 5d3f252659f191fe040452e6b55a862d850450e8..ba765473d1f0662ef79f6be24b499bd4c8a29509 100644 (file)
@@ -1791,6 +1791,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
        for (i=0; i < rqstp->rq_enc_pages_num; i++)
                __free_page(rqstp->rq_enc_pages[i]);
        kfree(rqstp->rq_enc_pages);
+       rqstp->rq_release_snd_buf = NULL;
 }
 
 static int
@@ -1799,6 +1800,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
        struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
        int first, last, i;
 
+       if (rqstp->rq_release_snd_buf)
+               rqstp->rq_release_snd_buf(rqstp);
+
        if (snd_buf->page_len == 0) {
                rqstp->rq_enc_pages_num = 0;
                return 0;
index ae3b8145da35a236cb24a7aff544b3f99d67547d..c6782aa475257bb510402a2172c8d4f55706a79a 100644 (file)
@@ -1915,6 +1915,13 @@ call_connect_status(struct rpc_task *task)
        struct rpc_clnt *clnt = task->tk_client;
        int status = task->tk_status;
 
+       /* Check if the task was already transmitted */
+       if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+               xprt_end_transmit(task);
+               task->tk_action = call_transmit_status;
+               return;
+       }
+
        dprint_status(task);
 
        trace_rpc_connect_status(task);
@@ -2302,6 +2309,7 @@ out_retry:
        task->tk_status = 0;
        /* Note: rpc_verify_header() may have freed the RPC slot */
        if (task->tk_rqstp == req) {
+               xdr_free_bvec(&req->rq_rcv_buf);
                req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
                if (task->tk_client->cl_discrtry)
                        xprt_conditional_disconnect(req->rq_xprt,
index 86bea4520c4d1fb3db7249bbd69f87721d02e6b5..ce927002862a675a9f1169d12fbeb6999984a1c6 100644 (file)
@@ -826,8 +826,15 @@ void xprt_connect(struct rpc_task *task)
                        return;
                if (xprt_test_and_set_connecting(xprt))
                        return;
-               xprt->stat.connect_start = jiffies;
-               xprt->ops->connect(xprt, task);
+               /* Race breaker */
+               if (!xprt_connected(xprt)) {
+                       xprt->stat.connect_start = jiffies;
+                       xprt->ops->connect(xprt, task);
+               } else {
+                       xprt_clear_connecting(xprt);
+                       task->tk_status = 0;
+                       rpc_wake_up_queued_task(&xprt->pending, task);
+               }
        }
        xprt_release_write(xprt, task);
 }
@@ -1623,6 +1630,8 @@ xprt_request_init(struct rpc_task *task)
        req->rq_snd_buf.buflen = 0;
        req->rq_rcv_buf.len = 0;
        req->rq_rcv_buf.buflen = 0;
+       req->rq_snd_buf.bvec = NULL;
+       req->rq_rcv_buf.bvec = NULL;
        req->rq_release_snd_buf = NULL;
        xprt_reset_majortimeo(req);
        dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
index ae77c71c1f640c32fd8d673934be412da9da0189..8a5e823e0b339b1998ff21b9cac814de213a2b23 100644 (file)
@@ -330,18 +330,16 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
 {
        size_t i,n;
 
-       if (!(buf->flags & XDRBUF_SPARSE_PAGES))
+       if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
                return want;
-       if (want > buf->page_len)
-               want = buf->page_len;
        n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
        for (i = 0; i < n; i++) {
                if (buf->pages[i])
                        continue;
                buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
                if (!buf->pages[i]) {
-                       buf->page_len = (i * PAGE_SIZE) - buf->page_base;
-                       return buf->page_len;
+                       i *= PAGE_SIZE;
+                       return i > buf->page_base ? i - buf->page_base : 0;
                }
        }
        return want;
@@ -378,8 +376,8 @@ static ssize_t
 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
                size_t count)
 {
-       struct kvec kvec = { 0 };
-       return xs_read_kvec(sock, msg, flags | MSG_TRUNC, &kvec, count, 0);
+       iov_iter_discard(&msg->msg_iter, READ, count);
+       return sock_recvmsg(sock, msg, flags);
 }
 
 static ssize_t
@@ -398,16 +396,17 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
                if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
                        goto out;
                if (ret != want)
-                       goto eagain;
+                       goto out;
                seek = 0;
        } else {
                seek -= buf->head[0].iov_len;
                offset += buf->head[0].iov_len;
        }
-       if (seek < buf->page_len) {
-               want = xs_alloc_sparse_pages(buf,
-                               min_t(size_t, count - offset, buf->page_len),
-                               GFP_NOWAIT);
+
+       want = xs_alloc_sparse_pages(buf,
+                       min_t(size_t, count - offset, buf->page_len),
+                       GFP_NOWAIT);
+       if (seek < want) {
                ret = xs_read_bvec(sock, msg, flags, buf->bvec,
                                xdr_buf_pagecount(buf),
                                want + buf->page_base,
@@ -418,12 +417,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
                if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
                        goto out;
                if (ret != want)
-                       goto eagain;
+                       goto out;
                seek = 0;
        } else {
-               seek -= buf->page_len;
-               offset += buf->page_len;
+               seek -= want;
+               offset += want;
        }
+
        if (seek < buf->tail[0].iov_len) {
                want = min_t(size_t, count - offset, buf->tail[0].iov_len);
                ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
@@ -433,17 +433,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
                if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
                        goto out;
                if (ret != want)
-                       goto eagain;
+                       goto out;
        } else
                offset += buf->tail[0].iov_len;
        ret = -EMSGSIZE;
-       msg->msg_flags |= MSG_TRUNC;
 out:
        *read = offset - seek_init;
        return ret;
-eagain:
-       ret = -EAGAIN;
-       goto out;
 sock_err:
        offset += seek;
        goto out;
@@ -486,19 +482,20 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
        if (transport->recv.offset == transport->recv.len) {
                if (xs_read_stream_request_done(transport))
                        msg->msg_flags |= MSG_EOR;
-               return transport->recv.copied;
+               return read;
        }
 
        switch (ret) {
+       default:
+               break;
+       case -EFAULT:
        case -EMSGSIZE:
-               return transport->recv.copied;
+               msg->msg_flags |= MSG_TRUNC;
+               return read;
        case 0:
                return -ESHUTDOWN;
-       default:
-               if (ret < 0)
-                       return ret;
        }
-       return -EAGAIN;
+       return ret < 0 ? ret : read;
 }
 
 static size_t
@@ -537,7 +534,7 @@ xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 
        ret = xs_read_stream_request(transport, msg, flags, req);
        if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
-               xprt_complete_bc_request(req, ret);
+               xprt_complete_bc_request(req, transport->recv.copied);
 
        return ret;
 }
@@ -570,7 +567,7 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
 
        spin_lock(&xprt->queue_lock);
        if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
-               xprt_complete_rqst(req->rq_task, ret);
+               xprt_complete_rqst(req->rq_task, transport->recv.copied);
        xprt_unpin_rqst(req);
 out:
        spin_unlock(&xprt->queue_lock);
@@ -591,10 +588,8 @@ xs_read_stream(struct sock_xprt *transport, int flags)
                if (ret <= 0)
                        goto out_err;
                transport->recv.offset = ret;
-               if (ret != want) {
-                       ret = -EAGAIN;
-                       goto out_err;
-               }
+               if (transport->recv.offset != want)
+                       return transport->recv.offset;
                transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
                        RPC_FRAGMENT_SIZE_MASK;
                transport->recv.offset -= sizeof(transport->recv.fraghdr);
@@ -602,6 +597,9 @@ xs_read_stream(struct sock_xprt *transport, int flags)
        }
 
        switch (be32_to_cpu(transport->recv.calldir)) {
+       default:
+               msg.msg_flags |= MSG_TRUNC;
+               break;
        case RPC_CALL:
                ret = xs_read_stream_call(transport, &msg, flags);
                break;
@@ -616,6 +614,9 @@ xs_read_stream(struct sock_xprt *transport, int flags)
                goto out_err;
        read += ret;
        if (transport->recv.offset < transport->recv.len) {
+               if (!(msg.msg_flags & MSG_TRUNC))
+                       return read;
+               msg.msg_flags = 0;
                ret = xs_read_discard(transport->sock, &msg, flags,
                                transport->recv.len - transport->recv.offset);
                if (ret <= 0)
@@ -623,7 +624,7 @@ xs_read_stream(struct sock_xprt *transport, int flags)
                transport->recv.offset += ret;
                read += ret;
                if (transport->recv.offset != transport->recv.len)
-                       return -EAGAIN;
+                       return read;
        }
        if (xs_read_stream_request_done(transport)) {
                trace_xs_stream_read_request(transport);
@@ -633,13 +634,7 @@ xs_read_stream(struct sock_xprt *transport, int flags)
        transport->recv.len = 0;
        return read;
 out_err:
-       switch (ret) {
-       case 0:
-       case -ESHUTDOWN:
-               xprt_force_disconnect(&transport->xprt);
-               return -ESHUTDOWN;
-       }
-       return ret;
+       return ret != 0 ? ret : -ESHUTDOWN;
 }
 
 static void xs_stream_data_receive(struct sock_xprt *transport)
@@ -648,12 +643,12 @@ static void xs_stream_data_receive(struct sock_xprt *transport)
        ssize_t ret = 0;
 
        mutex_lock(&transport->recv_mutex);
+       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        if (transport->sock == NULL)
                goto out;
-       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        for (;;) {
                ret = xs_read_stream(transport, MSG_DONTWAIT);
-               if (ret <= 0)
+               if (ret < 0)
                        break;
                read += ret;
                cond_resched();
@@ -1345,10 +1340,10 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
        int err;
 
        mutex_lock(&transport->recv_mutex);
+       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        sk = transport->inet;
        if (sk == NULL)
                goto out;
-       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        for (;;) {
                skb = skb_recv_udp(sk, 0, 1, &err);
                if (skb == NULL)
index 12b3edf70a7b91966d4ee524fe10aa9a31bcc26b..1615e503f8e3919ac1eb33cab98445acc00957f9 100644 (file)
@@ -272,11 +272,11 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
 
        p1 = (u8*)(ht_capa);
        p2 = (u8*)(ht_capa_mask);
-       for (i = 0; i<sizeof(*ht_capa); i++)
+       for (i = 0; i < sizeof(*ht_capa); i++)
                p1[i] &= p2[i];
 }
 
-/*  Do a logical ht_capa &= ht_capa_mask.  */
+/*  Do a logical vht_capa &= vht_capa_mask.  */
 void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
                                const struct ieee80211_vht_cap *vht_capa_mask)
 {
index 744b5851bbf9010ea2c2cac867fcbda487c4184b..8d763725498c15fc7474f5ca78802233800ee4c5 100644 (file)
@@ -7870,6 +7870,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        }
 
        memset(&params, 0, sizeof(params));
+       params.beacon_csa.ftm_responder = -1;
 
        if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
            !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
index d536b07582f8c90e9dce435c5e569dfb8d406fe9..f741d8376a463b588231550c4cca661e55d2e6b7 100644 (file)
@@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void)
         * All devices must be idle as otherwise if you are actively
         * scanning some new beacon hints could be learned and would
         * count as new regulatory hints.
+        * Also if there is any other active beaconing interface we
+        * need not issue a disconnect hint and reset any info such
+        * as chan dfs state, etc.
         */
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
                list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
                        wdev_lock(wdev);
-                       if (wdev->conn || wdev->current_bss)
+                       if (wdev->conn || wdev->current_bss ||
+                           cfg80211_beaconing_iface_active(wdev))
                                is_all_idle = false;
                        wdev_unlock(wdev);
                }
@@ -1171,6 +1175,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
 
        cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
                                  rdev->wiphy.ht_capa_mod_mask);
+       cfg80211_oper_and_vht_capa(&connect->vht_capa_mask,
+                                  rdev->wiphy.vht_capa_mod_mask);
 
        if (connkeys && connkeys->def >= 0) {
                int idx;
index ef14d80ca03ee22c5568933e1ff3d970557e59ea..d473bd135da8babc52329982bafc518ffeaf52fb 100644 (file)
@@ -1421,6 +1421,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
                                                          ies[pos + ext],
                                                          ext == 2))
                                        pos = skip_ie(ies, ielen, pos);
+                               else
+                                       break;
                        }
                } else {
                        pos = skip_ie(ies, ielen, pos);
index d49aa79b79970d403b5c165d4000b2aa1d493442..5121729b8b631f45d81d3e0332b0d2e4346d3c5d 100644 (file)
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
        }
 
        len = *skb->data;
-       needed = 1 + (len >> 4) + (len & 0x0f);
+       needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
 
        if (!pskb_may_pull(skb, needed)) {
                /* packet is too short to hold the addresses it claims
@@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr,
        sk_for_each(s, &x25_list)
                if ((!strcmp(addr->x25_addr,
                        x25_sk(s)->source_addr.x25_addr) ||
-                               !strcmp(addr->x25_addr,
+                               !strcmp(x25_sk(s)->source_addr.x25_addr,
                                        null_x25_address.x25_addr)) &&
                                        s->sk_state == TCP_LISTEN) {
                        /*
@@ -688,11 +688,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       len = strlen(addr->sx25_addr.x25_addr);
-       for (i = 0; i < len; i++) {
-               if (!isdigit(addr->sx25_addr.x25_addr[i])) {
-                       rc = -EINVAL;
-                       goto out;
+       /* check for the null_x25_address */
+       if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
+
+               len = strlen(addr->sx25_addr.x25_addr);
+               for (i = 0; i < len; i++) {
+                       if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+                               rc = -EINVAL;
+                               goto out;
+                       }
                }
        }
 
index 3c12cae32001da306a97cae51979118931360056..afb26221d8a8f26f96ec257abf8822bc36e6351a 100644 (file)
@@ -142,6 +142,15 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                        sk->sk_state_change(sk);
                break;
        }
+       case X25_CALL_REQUEST:
+               /* call collision */
+               x25->causediag.cause      = 0x01;
+               x25->causediag.diagnostic = 0x48;
+
+               x25_write_internal(sk, X25_CLEAR_REQUEST);
+               x25_disconnect(sk, EISCONN, 0x01, 0x48);
+               break;
+
        case X25_CLEAR_REQUEST:
                if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
                        goto out_clear;
index 2f48da98b5d421060da5fc056d1199da22031872..dbd37460c573ec549fe33c7015f77206f4485064 100644 (file)
@@ -363,10 +363,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
                                                PASS_POS_INSERT_BEFORE);
 
        /*
-        * The stackleak_cleanup pass should be executed after the
-        * "reload" pass, when the stack frame size is final.
+        * The stackleak_cleanup pass should be executed before the "*free_cfg"
+        * pass. It's the moment when the stack frame size is already final,
+        * function prologues and epilogues are generated, and the
+        * machine-dependent code transformations are not done.
         */
-       PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER);
+       PASS_INFO(stackleak_cleanup, "*free_cfg", 1, PASS_POS_INSERT_BEFORE);
 
        if (!plugin_default_version_check(version, &gcc_version)) {
                error(G_("incompatible gcc/plugin versions"));
index 66c90f486af913243a1005571a6c88cce93de76f..818dff1de545fad25669adf1144cbd49e5934067 100644 (file)
@@ -36,6 +36,7 @@
 #include <sound/timer.h>
 #include <sound/minors.h>
 #include <linux/uio.h>
+#include <linux/delay.h>
 
 #include "pcm_local.h"
 
@@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
  * and this may lead to a deadlock when the code path takes read sem
  * twice (e.g. one in snd_pcm_action_nonatomic() and another in
  * snd_pcm_stream_lock()).  As a (suboptimal) workaround, let writer to
- * spin until it gets the lock.
+ * sleep until all the readers are completed without blocking by writer.
  */
-static inline void down_write_nonblock(struct rw_semaphore *lock)
+static inline void down_write_nonfifo(struct rw_semaphore *lock)
 {
        while (!down_write_trylock(lock))
-               cond_resched();
+               msleep(1);
 }
 
 #define PCM_LOCK_DEFAULT       0
@@ -1967,7 +1968,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
                res = -ENOMEM;
                goto _nolock;
        }
-       down_write_nonblock(&snd_pcm_link_rwsem);
+       down_write_nonfifo(&snd_pcm_link_rwsem);
        write_lock_irq(&snd_pcm_link_rwlock);
        if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
            substream->runtime->status->state != substream1->runtime->status->state ||
@@ -2014,7 +2015,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
        struct snd_pcm_substream *s;
        int res = 0;
 
-       down_write_nonblock(&snd_pcm_link_rwsem);
+       down_write_nonfifo(&snd_pcm_link_rwsem);
        write_lock_irq(&snd_pcm_link_rwlock);
        if (!snd_pcm_stream_linked(substream)) {
                res = -EALREADY;
@@ -2369,7 +2370,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
 
 static void pcm_release_private(struct snd_pcm_substream *substream)
 {
-       snd_pcm_unlink(substream);
+       if (snd_pcm_stream_linked(substream))
+               snd_pcm_unlink(substream);
 }
 
 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
index 0bbdf1a01e7634fc3a9aee3c986444aa9e2b7452..76f03abd15ab766190c4d5739f707d81aa0e2d70 100644 (file)
@@ -2498,6 +2498,10 @@ static const struct pci_device_id azx_ids[] = {
        /* AMD Hudson */
        { PCI_DEVICE(0x1022, 0x780d),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+       /* AMD Stoney */
+       { PCI_DEVICE(0x1022, 0x157a),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+                        AZX_DCAPS_PM_RUNTIME },
        /* AMD Raven */
        { PCI_DEVICE(0x1022, 0x15e3),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
index 06f93032d0ccf5a78cc1d41d18ffd5e62d5a0530..8d75597028eebbb9f32e6de7009f101b8c117f4d 100644 (file)
@@ -4988,9 +4988,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
                { 0x19, 0x21a11010 }, /* dock mic */
                { }
        };
+       /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
+        * the speaker output becomes too low by some reason on Thinkpads with
+        * ALC298 codec
+        */
+       static hda_nid_t preferred_pairs[] = {
+               0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
+               0
+       };
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.preferred_dacs = preferred_pairs;
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                snd_hda_apply_pincfgs(codec, pincfgs);
        } else if (action == HDA_FIXUP_ACT_INIT) {
@@ -5510,6 +5519,7 @@ enum {
        ALC221_FIXUP_HP_HEADSET_MIC,
        ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
        ALC295_FIXUP_HP_AUTO_MUTE,
+       ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6387,6 +6397,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_auto_mute_via_amp,
        },
+       [ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6401,7 +6420,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -7065,6 +7088,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x19, 0x04a11040},
                {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60120},
                {0x14, 0x90170110},
index 2bfe4e80a6b92b467c502d66ddf04d2b64f8154c..a105947eaf55cd64c0d053cd70bfbf0f68f52ee1 100644 (file)
@@ -682,9 +682,12 @@ static int usb_audio_probe(struct usb_interface *intf,
 
  __error:
        if (chip) {
+               /* chip->active is inside the chip->card object,
+                * decrement before memory is possibly returned.
+                */
+               atomic_dec(&chip->active);
                if (!chip->num_interfaces)
                        snd_card_free(chip->card);
-               atomic_dec(&chip->active);
        }
        mutex_unlock(&register_mutex);
        return err;
index 8a945ece98690d96b580da4121657c60a0d9b118..6623cafc94f2c639bcceefe877c57927ac31042b 100644 (file)
@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
 
+       case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
        case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
        case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
        case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
index 55bc512a18318c8b8745a4f174ba2f85c85c83e9..e4e6e2b3fd84742758a53c97d1c55df009fb2fbc 100644 (file)
@@ -32,7 +32,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
 }
 
 static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
-                              const void *data)
+                              __u8 bit_offset, const void *data)
 {
        int actual_type_id;
 
@@ -40,7 +40,7 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
        if (actual_type_id < 0)
                return actual_type_id;
 
-       return btf_dumper_do_type(d, actual_type_id, 0, data);
+       return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
 }
 
 static void btf_dumper_enum(const void *data, json_writer_t *jw)
@@ -237,7 +237,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
        case BTF_KIND_VOLATILE:
        case BTF_KIND_CONST:
        case BTF_KIND_RESTRICT:
-               return btf_dumper_modifier(d, type_id, data);
+               return btf_dumper_modifier(d, type_id, bit_offset, data);
        default:
                jsonw_printf(d->jw, "(unsupported-kind");
                return -EINVAL;
index 852dc17ab47a07f2580ade5f9e4a1130ee779c26..72c453a8bf50ed5cd4a0383997f5727048ce8d60 100644 (file)
@@ -2170,7 +2170,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2187,12 +2187,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2201,8 +2203,10 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for UDP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2219,12 +2223,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2233,6 +2239,8 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
  * int bpf_sk_release(struct bpf_sock *sk)
  *     Description
@@ -2405,6 +2413,9 @@ enum bpf_func_id {
 /* BPF_FUNC_perf_event_output for sk_buff input context. */
 #define BPF_F_CTXLEN_MASK              (0xfffffULL << 32)
 
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS            (-1L)
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
        BPF_LWT_ENCAP_SEG6_INLINE
 };
 
+#define __bpf_md_ptr(type, name)       \
+union {                                        \
+       type name;                      \
+       __u64 :64;                      \
+} __attribute__((aligned(8)))
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -2456,7 +2473,7 @@ struct __sk_buff {
        /* ... here. */
 
        __u32 data_meta;
-       struct bpf_flow_keys *flow_keys;
+       __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
 };
 
 struct bpf_tunnel_key {
@@ -2572,8 +2589,8 @@ enum sk_action {
  * be added to the end of this structure
  */
 struct sk_msg_md {
-       void *data;
-       void *data_end;
+       __bpf_md_ptr(void *, data);
+       __bpf_md_ptr(void *, data_end);
 
        __u32 family;
        __u32 remote_ip4;       /* Stored in network byte order */
@@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
         * Start of directly accessible data. It begins from
         * the tcp/udp header.
         */
-       void *data;
-       void *data_end;         /* End of directly accessible data */
+       __bpf_md_ptr(void *, data);
+       /* End of directly accessible data */
+       __bpf_md_ptr(void *, data_end);
        /*
         * Total length of packet (starting from the tcp/udp header).
         * Note that the directly accessible bytes (data_end - data)
index 01ec04bf91b592e470c26512e744d5e59211a3d4..6c16ac36d482c6b71ecf8419ae1191b545e0b942 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
 #include <linux/libnvdimm.h>
+#include <linux/genalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/device.h>
 #include <linux/module.h>
@@ -215,6 +216,8 @@ struct nfit_test {
 
 static struct workqueue_struct *nfit_wq;
 
+static struct gen_pool *nfit_pool;
+
 static struct nfit_test *to_nfit_test(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1132,6 +1135,9 @@ static void release_nfit_res(void *data)
        list_del(&nfit_res->list);
        spin_unlock(&nfit_test_lock);
 
+       if (resource_size(&nfit_res->res) >= DIMM_SIZE)
+               gen_pool_free(nfit_pool, nfit_res->res.start,
+                               resource_size(&nfit_res->res));
        vfree(nfit_res->buf);
        kfree(nfit_res);
 }
@@ -1144,7 +1150,7 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
                        GFP_KERNEL);
        int rc;
 
-       if (!buf || !nfit_res)
+       if (!buf || !nfit_res || !*dma)
                goto err;
        rc = devm_add_action(dev, release_nfit_res, nfit_res);
        if (rc)
@@ -1164,6 +1170,8 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
 
        return nfit_res->buf;
  err:
+       if (*dma && size >= DIMM_SIZE)
+               gen_pool_free(nfit_pool, *dma, size);
        if (buf)
                vfree(buf);
        kfree(nfit_res);
@@ -1172,9 +1180,16 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
 
 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
 {
+       struct genpool_data_align data = {
+               .align = SZ_128M,
+       };
        void *buf = vmalloc(size);
 
-       *dma = (unsigned long) buf;
+       if (size >= DIMM_SIZE)
+               *dma = gen_pool_alloc_algo(nfit_pool, size,
+                               gen_pool_first_fit_align, &data);
+       else
+               *dma = (unsigned long) buf;
        return __test_alloc(t, size, dma, buf);
 }
 
@@ -2839,6 +2854,17 @@ static __init int nfit_test_init(void)
                goto err_register;
        }
 
+       nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
+       if (!nfit_pool) {
+               rc = -ENOMEM;
+               goto err_register;
+       }
+
+       if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
+               rc = -ENOMEM;
+               goto err_register;
+       }
+
        for (i = 0; i < NUM_NFITS; i++) {
                struct nfit_test *nfit_test;
                struct platform_device *pdev;
@@ -2894,6 +2920,9 @@ static __init int nfit_test_init(void)
        return 0;
 
  err_register:
+       if (nfit_pool)
+               gen_pool_destroy(nfit_pool);
+
        destroy_workqueue(nfit_wq);
        for (i = 0; i < NUM_NFITS; i++)
                if (instances[i])
@@ -2917,6 +2946,8 @@ static __exit void nfit_test_exit(void)
        platform_driver_unregister(&nfit_test_driver);
        nfit_test_teardown();
 
+       gen_pool_destroy(nfit_pool);
+
        for (i = 0; i < NUM_NFITS; i++)
                put_device(&instances[i]->pdev.dev);
        class_destroy(nfit_test_dimm);
index 686e57ce40f430fe25f910d5eda49cf0c8236949..efb6c13ab0debe7e82b5615d9eb0cb49f05ad6fb 100644 (file)
@@ -154,12 +154,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
        (void *) BPF_FUNC_skb_ancestor_cgroup_id;
 static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
                                             struct bpf_sock_tuple *tuple,
-                                            int size, unsigned int netns_id,
+                                            int size, unsigned long long netns_id,
                                             unsigned long long flags) =
        (void *) BPF_FUNC_sk_lookup_tcp;
 static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
                                             struct bpf_sock_tuple *tuple,
-                                            int size, unsigned int netns_id,
+                                            int size, unsigned long long netns_id,
                                             unsigned long long flags) =
        (void *) BPF_FUNC_sk_lookup_udp;
 static int (*bpf_sk_release)(struct bpf_sock *sk) =
index f42b3396d6226dfec5fddf5d3a9a2d13c5c4ffb6..38e1cbaaffdbbcfb48e4b02501eef0f18f9763b0 100644 (file)
@@ -432,11 +432,11 @@ static struct btf_raw_test raw_tests[] = {
                /* const void* */       /* [3] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
                /* typedef const void * const_void_ptr */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
-               /* struct A { */        /* [4] */
+               BTF_TYPEDEF_ENC(NAME_TBD, 3),   /* [4] */
+               /* struct A { */        /* [5] */
                BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
                /* const_void_ptr m; */
-               BTF_MEMBER_ENC(NAME_TBD, 3, 0),
+               BTF_MEMBER_ENC(NAME_TBD, 4, 0),
                /* } */
                BTF_END_RAW,
        },
@@ -494,10 +494,10 @@ static struct btf_raw_test raw_tests[] = {
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
                /* const void* */       /* [3] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
-               /* typedef const void * const_void_ptr */       /* [4] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
-               /* const_void_ptr[4] */ /* [5] */
-               BTF_TYPE_ARRAY_ENC(3, 1, 4),
+               /* typedef const void * const_void_ptr */
+               BTF_TYPEDEF_ENC(NAME_TBD, 3),   /* [4] */
+               /* const_void_ptr[4] */
+               BTF_TYPE_ARRAY_ENC(4, 1, 4),    /* [5] */
                BTF_END_RAW,
        },
        .str_sec = "\0const_void_ptr",
@@ -1292,6 +1292,367 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "type != 0",
 },
 
+{
+       .descr = "typedef (invalid name, name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_TYPEDEF_ENC(0, 1),                          /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "typedef_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "typedef (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_TYPEDEF_ENC(NAME_TBD, 1),                   /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__!int",
+       .str_sec_size = sizeof("\0__!int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "typedef_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "ptr type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1),      /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "ptr_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "volatile type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "volatile_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "const type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1),    /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "const_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "restrict type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1),   /* [2] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "restrict_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "fwd type (invalid name, name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0),   /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__skb",
+       .str_sec_size = sizeof("\0__skb"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "fwd_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "fwd type (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0),      /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__!skb",
+       .str_sec_size = sizeof("\0__!skb"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "fwd_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "array type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0),    /* [2] */
+               BTF_ARRAY_ENC(1, 1, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__skb",
+       .str_sec_size = sizeof("\0__skb"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "struct type (name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A",
+       .str_sec_size = sizeof("\0A"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+},
+
+{
+       .descr = "struct type (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A!\0B",
+       .str_sec_size = sizeof("\0A!\0B"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "struct member (name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A",
+       .str_sec_size = sizeof("\0A"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+},
+
+{
+       .descr = "struct member (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0B*",
+       .str_sec_size = sizeof("\0A\0B*"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "enum type (name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0B",
+       .str_sec_size = sizeof("\0A\0B"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+},
+
+{
+       .descr = "enum type (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A!\0B",
+       .str_sec_size = sizeof("\0A!\0B"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "enum member (invalid name, name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(0, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "enum member (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A!",
+       .str_sec_size = sizeof("\0A!"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
 {
        .descr = "arraymap invalid btf key (a bit field)",
        .raw_types = {
index b745bdc08c2bd841e437831e24459ff51f01c34b..e21cd736c196efcfe4d633f05661a1ddb18b8783 100644 (file)
@@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
                return TC_ACT_SHOT;
 
        tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
-       sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
        if (sk)
                bpf_sk_release(sk);
        return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
@@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
        struct bpf_sock_tuple tuple = {};
        struct bpf_sock *sk;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        if (sk)
                bpf_sk_release(sk);
        return 0;
@@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
        struct bpf_sock *sk;
        __u32 family = 0;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        if (sk) {
                bpf_sk_release(sk);
                family = sk->family;
@@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
        struct bpf_sock *sk;
        __u32 family;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        if (sk) {
                sk += 1;
                bpf_sk_release(sk);
@@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
        struct bpf_sock *sk;
        __u32 family;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        sk += 1;
        if (sk)
                bpf_sk_release(sk);
@@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
 {
        struct bpf_sock_tuple tuple = {};
 
-       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        return 0;
 }
 
@@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
        struct bpf_sock_tuple tuple = {};
        struct bpf_sock *sk;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        bpf_sk_release(sk);
        bpf_sk_release(sk);
        return 0;
@@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
        struct bpf_sock_tuple tuple = {};
        struct bpf_sock *sk;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        bpf_sk_release(sk);
        return 0;
 }
@@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
 void lookup_no_release(struct __sk_buff *skb)
 {
        struct bpf_sock_tuple tuple = {};
-       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
 }
 
 SEC("fail_no_release_subcall")
index 550b7e46bf4a4143059cc7ea58643adf11251353..df6f751cc1e81c0bcc1f68e86a5bc57e530a2452 100644 (file)
@@ -8576,7 +8576,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map_hash_8b = { 4 },
-               .errstr = "R0 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -10547,7 +10547,7 @@ static struct bpf_test tests[] = {
                "check deducing bounds from const, 5",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
                        BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
@@ -14230,7 +14230,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 
        reject_from_alignment = fd_prog < 0 &&
                                (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
-                               strstr(bpf_vlog, "Unknown alignment.");
+                               strstr(bpf_vlog, "misaligned");
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        if (reject_from_alignment) {
                printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",