]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'linux-4.8' of git://github.com/skeggsb/linux into drm-next
authorDave Airlie <airlied@redhat.com>
Tue, 2 Aug 2016 01:16:02 +0000 (11:16 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 2 Aug 2016 01:16:02 +0000 (11:16 +1000)
Runtime PM fixes, fbcon and nv30 fix.
* 'linux-4.8' of git://github.com/skeggsb/linux:
  drm/nouveau/gr/nv3x: fix instobj write offsets in gr setup
  drm/nouveau/acpi: fix lockup with PCIe runtime PM
  drm/nouveau/acpi: check for function 0x1B before using it
  drm/nouveau/acpi: return supported DSM functions
  drm/nouveau/acpi: ensure matching ACPI handle and supported functions
  drm/nouveau/fbcon: fix font width not divisible by 8

707 files changed:
Documentation/gdb-kernel-debugging.txt
Documentation/gpu/drm-internals.rst
Documentation/gpu/drm-kms.rst
Documentation/x86/intel_mpx.txt
Documentation/x86/tlb.txt
Documentation/x86/x86_64/machinecheck
MAINTAINERS
Makefile
arch/arc/Makefile
arch/arc/kernel/stacktrace.c
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-r8-chip.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/tegra30-beaver.dts
arch/arm/kvm/arm.c
arch/arm/mach-mvebu/Makefile
arch/arm/mach-mvebu/coherency.c
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/ptrace.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/entry.S
arch/arm64/mm/fault.c
arch/m32r/boot/compressed/m32r_sio.c
arch/m68k/coldfire/head.S
arch/m68k/coldfire/m5272.c
arch/m68k/coldfire/pci.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/ifpsp060/src/fpsp.S
arch/m68k/ifpsp060/src/pfpsp.S
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/m525xsim.h
arch/m68k/include/asm/mcfmmu.h
arch/m68k/include/asm/q40_master.h
arch/m68k/mac/iop.c
arch/m68k/math-emu/fp_decode.h
arch/mips/include/asm/pgtable.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/tm.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/pgtable-radix.c
arch/s390/include/asm/fpu/api.h
arch/s390/kernel/ipl.c
arch/x86/events/core.c
arch/x86/events/intel/Makefile
arch/x86/events/intel/core.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/pvclock.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/pvclock.c
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/kasan_init_64.c
arch/x86/pci/acpi.c
arch/x86/power/hibernate_64.c
arch/x86/power/hibernate_asm_64.S
block/ioprio.c
crypto/asymmetric_keys/mscode_parser.c
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/restrict.c
crypto/crypto_user.c
crypto/rsa-pkcs1pad.c
drivers/acpi/acpi_dbg.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/ec.c
drivers/acpi/nfit.c
drivers/acpi/nfit.h
drivers/acpi/pci_link.c
drivers/ata/ahci_seattle.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/bcma/bcma_private.h
drivers/block/xen-blkfront.c
drivers/clk/at91/clk-programmable.c
drivers/clk/clk-oxnas.c
drivers/clk/rockchip/clk-cpu.c
drivers/clk/rockchip/clk-mmc-phase.c
drivers/clk/rockchip/clk-rk3399.c
drivers/clk/sunxi/clk-sun4i-display.c
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
drivers/connector/cn_proc.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle.c
drivers/crypto/qat/qat_common/Makefile
drivers/crypto/ux500/hash/hash_core.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/ppc-xlate.pl
drivers/dma-buf/dma-buf.c
drivers/edac/sb_edac.c
drivers/gpio/Kconfig
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpiolib-legacy.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/fiji_smc.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/iceland_smum.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h [deleted file]
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/ppsmc.h
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/tonga_smc.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/arc/Kconfig
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arm/Kconfig
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/armada/Kconfig
drivers/gpu/drm/ast/Kconfig
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/atmel-hlcdc/Kconfig
drivers/gpu/drm/bochs/Kconfig
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/bridge/parade-ps8622.c
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/fsl-dcu/Kconfig
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/imx/Kconfig
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/mgag200/Kconfig
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/omapdrm/Kconfig
drivers/gpu/drm/omapdrm/displays/connector-dvi.c
drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
drivers/gpu/drm/omapdrm/dss/dss-of.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/qxl/Kconfig
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/rockchip/Kconfig
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/shmobile/Kconfig
drivers/gpu/drm/sti/sti_compositor.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_framebuffer.c
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tilcdc/Kconfig
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/Kconfig
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vgem/Makefile
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vgem/vgem_drv.h
drivers/gpu/drm/vgem/vgem_fence.c [new file with mode: 0644]
drivers/gpu/drm/virtio/Kconfig
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/hid/hid-multitouch.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-boardinfo.c
drivers/i2c/muxes/i2c-mux-reg.c
drivers/iio/accel/kxsd9.c
drivers/iio/adc/ad7266.c
drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/input/joystick/xpad.c
drivers/input/mouse/elantech.c
drivers/input/mouse/vmmouse.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_f12.c
drivers/input/touchscreen/ts4800-ts.c
drivers/input/touchscreen/tsc2004.c
drivers/input/touchscreen/tsc2005.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/touchscreen/tsc200x-core.h
drivers/input/touchscreen/wacom_w8001.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/iommu/iova.c
drivers/irqchip/irq-mips-gic.c
drivers/media/i2c/adv7604.c
drivers/media/usb/airspy/airspy.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/mfd/max77620.c
drivers/mmc/card/block.c
drivers/mmc/host/pxamci.c
drivers/mtd/nand/omap2.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/can/at91_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/dev.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/atheros/alx/alx.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/fddi/skfp/Makefile
drivers/net/geneve.c
drivers/net/macsec.c
drivers/net/phy/dp83867.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/marvell.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/core.c
drivers/phy/phy-bcm-ns-usb2.c
drivers/phy/phy-miphy28lp.c
drivers/phy/phy-rcar-gen3-usb2.c
drivers/phy/phy-rockchip-dp.c
drivers/phy/phy-stih407-usb.c
drivers/phy/phy-sun4i-usb.c
drivers/pinctrl/Makefile
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/tegra/Makefile
drivers/platform/chrome/cros_ec_dev.c
drivers/platform/x86/apple-gmux.c
drivers/power/power_supply_core.c
drivers/power/tps65217_charger.c
drivers/pps/clients/pps_parport.c
drivers/regulator/anatop-regulator.c
drivers/regulator/max77620-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/ipr.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/scsi_devinfo.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-sun6i.c
drivers/spi/spi-ti-qspi.c
drivers/staging/iio/accel/sca3000_core.c
drivers/staging/iio/adc/ad7606_spi.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/tty/pty.c
drivers/tty/vt/keyboard.c
drivers/tty/vt/vt.c
drivers/usb/common/usb-otg-fsm.c
drivers/usb/core/hcd.c
drivers/usb/dwc3/dwc3-st.c
drivers/usb/host/ehci-st.c
drivers/usb/host/ohci-st.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xenbus/xenbus_dev_frontend.c
drivers/xen/xenbus/xenbus_xs.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/ceph/export.c
fs/ceph/file.c
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/ntlmssp.h
fs/cifs/sess.c
fs/cifs/smb2pdu.c
fs/configfs/file.c
fs/dax.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/kthread.c
fs/ecryptfs/main.c
fs/fs-writeback.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/inode.c
fs/libfs.c
fs/lockd/svc.c
fs/locks.c
fs/namespace.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pnfs.c
fs/nfs/pnfs_nfs.c
fs/nfs/read.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/xfs/xfs_ioctl.c
include/acpi/acpi_drivers.h
include/acpi/acpixf.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_irq.h [new file with mode: 0644]
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/kvm/arm_pmu.h
include/linux/audit.h
include/linux/bcma/bcma.h
include/linux/bpf.h
include/linux/filter.h
include/linux/huge_mm.h
include/linux/inet_diag.h
include/linux/memcontrol.h
include/linux/mfd/da9052/da9052.h
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/net.h
include/linux/netdevice.h
include/linux/posix_acl.h
include/linux/pwm.h
include/linux/qed/qed_eth_if.h
include/linux/radix-tree.h
include/linux/reset.h
include/linux/rmap.h
include/linux/skbuff.h
include/linux/sock_diag.h
include/linux/usb/ehci_def.h
include/net/bonding.h
include/net/gre.h
include/net/ip.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_tables.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_ife.h
include/uapi/drm/vgem_drm.h [new file with mode: 0644]
include/uapi/linux/Kbuild
include/uapi/linux/fuse.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/input.h
include/uapi/linux/netfilter/Kbuild
include/uapi/linux/netfilter/xt_SYNPROXY.h
init/Kconfig
kernel/audit.c
kernel/audit.h
kernel/auditsc.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cpu.c
kernel/events/core.c
kernel/gcov/gcc_4_7.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/loadavg.c
kernel/sched/sched.h
kernel/time/posix-cpu-timers.c
kernel/trace/bpf_trace.c
kernel/workqueue.c
mm/compaction.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/quarantine.c
mm/memcontrol.c
mm/memory.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/workingset.c
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/ax25/af_ax25.c
net/ax25/ax25_ds_timer.c
net/ax25/ax25_std_timer.c
net/ax25/ax25_subr.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/ceph/osdmap.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/decnet/dn_fib.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/gre_demux.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/icmp.c
net/ipv6/ip6_checksum.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/kcm/kcmproc.c
net/mac80211/mesh.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nft_ct.c
net/netfilter/nft_hash.c
net/netfilter/nft_meta.c
net/netfilter/nft_rbtree.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/rds/ib_cm.c
net/rds/loop.c
net/rds/sysctl.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rds/transport.c
net/rose/rose_in.c
net/sched/act_api.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/sch_fifo.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sctp/input.c
net/sctp/sctp_diag.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/netlink_compat.c
net/tipc/node.c
net/tipc/socket.c
net/vmw_vsock/af_vsock.c
net/wireless/nl80211.c
net/wireless/util.c
scripts/gdb/linux/.gitignore
scripts/gdb/linux/Makefile
scripts/gdb/linux/constants.py.in
scripts/gdb/linux/radixtree.py [deleted file]
scripts/gdb/linux/symbols.py
scripts/gdb/vmlinux-gdb.py
security/apparmor/lsm.c
sound/core/control.c
sound/core/pcm.c
sound/core/timer.c
sound/pci/au88x0/au88x0_core.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ak4613.c
sound/soc/codecs/cx20442.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8940.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/davinci/davinci-mcasp.h
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/atom/sst-mfld-platform-compress.c
sound/soc/intel/skylake/bxt-sst.c
sound/soc/sh/rcar/adg.c
sound/usb/card.c
tools/objtool/builtin-check.c
tools/testing/radix-tree/tag_check.c
tools/vm/slabinfo.c

index 4ab7d43d07544f7a22b35e49f9eb691eb3e9204d..7050ce8794b9a4b3dd93b76dd9e2a6d708b468ee 100644 (file)
@@ -139,27 +139,6 @@ Examples of using the Linux-provided gdb helpers
       start_comm = "swapper/2\000\000\000\000\000\000"
     }
 
- o Dig into a radix tree data structure, such as the IRQ descriptors:
-    (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
-    $6 = {
-      irq_common_data = {
-        state_use_accessors = 67584,
-        handler_data = 0x0 <__vectors_start>,
-        msi_desc = 0x0 <__vectors_start>,
-        affinity = {{
-            bits = {65535}
-          }}
-      },
-      irq_data = {
-        mask = 0,
-        irq = 18,
-        hwirq = 27,
-        common = 0xee803d80,
-        chip = 0xc0eb0854 <gic_data>,
-        domain = 0xee808000,
-        parent_data = 0x0 <__vectors_start>,
-        chip_data = 0xc0eb0854 <gic_data>
-      } <... trimmed ...>
 
 List of commands and functions
 ------------------------------
index 490d655cda20503a681684c53789eb2944e82fe1..3bb26135971f06a3f9d3794a1d23efbaafec28fe 100644 (file)
@@ -188,7 +188,8 @@ Manual IRQ Registration
 Drivers that require multiple interrupt handlers can't use the managed
 IRQ registration functions. In that case IRQs must be registered and
 unregistered manually (usually with the :c:func:`request_irq()` and
-:c:func:`free_irq()` functions, or their devm_\* equivalent).
+:c:func:`free_irq()` functions, or their :c:func:`devm_request_irq()` and
+:c:func:`devm_free_irq()` equivalents).
 
 When manually registering IRQs, drivers must not set the
 DRIVER_HAVE_IRQ driver feature flag, and must not provide the
@@ -242,11 +243,13 @@ Open/Close, File Operations and IOCTLs
 Open and Close
 --------------
 
-int (\*firstopen) (struct drm_device \*); void (\*lastclose) (struct
-drm_device \*); int (\*open) (struct drm_device \*, struct drm_file
-\*); void (\*preclose) (struct drm_device \*, struct drm_file \*);
-void (\*postclose) (struct drm_device \*, struct drm_file \*);
-    Open and close handlers. None of those methods are mandatory.
+Open and close handlers. None of those methods are mandatory::
+
+    int (*firstopen) (struct drm_device *);
+    void (*lastclose) (struct drm_device *);
+    int (*open) (struct drm_device *, struct drm_file *);
+    void (*preclose) (struct drm_device *, struct drm_file *);
+    void (*postclose) (struct drm_device *, struct drm_file *);
 
 The firstopen method is called by the DRM core for legacy UMS (User Mode
 Setting) drivers only when an application opens a device that has no
index 0e1c80436c1d1a5e95345cf0753e46c6ab974d36..8dfa4b214b96f39449f17fac9bf085a9f26ddb84 100644 (file)
@@ -67,9 +67,6 @@ drivers can manually clean up a framebuffer at module unload time with
 DRM Format Handling
 -------------------
 
-.. kernel-doc:: include/drm/drm_fourcc.h
-   :internal:
-
 .. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
    :export:
 
@@ -652,5 +649,5 @@ Vertical Blanking and Interrupt Handling Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_irq.c
    :export:
 
-.. kernel-doc:: include/drm/drmP.h
-   :functions: drm_crtc_vblank_waitqueue
+.. kernel-doc:: include/drm/drm_irq.h
+   :internal:
index 1a5a12184a358dc395874447ccc3c502d67f567b..85d0549ad84636652d1867d7d0a58c7ed1cc3c19 100644 (file)
@@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
    MPX-instrumented.
 3) The kernel detects that the CPU has MPX, allows the new prctl() to
    succeed, and notes the location of the bounds directory. Userspace is
-   expected to keep the bounds directory at that locationWe note it
+   expected to keep the bounds directory at that locationWe note it
    instead of reading it each time because the 'xsave' operation needed
    to access the bounds directory register is an expensive operation.
 4) If the application needs to spill bounds out of the 4 registers, it
@@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
 We need to decode MPX instructions to get violation address and
 set this address into extended struct siginfo.
 
-The _sigfault feild of struct siginfo is extended as follow:
+The _sigfault field of struct siginfo is extended as follow:
 
 87             /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
 88             struct {
@@ -240,5 +240,5 @@ them at the same bounds table.
 This is allowed architecturally.  See more information "Intel(R) Architecture
 Instruction Set Extensions Programming Reference" (9.3.4).
 
-However, if users did this, the kernel might be fooled in to unmaping an
+However, if users did this, the kernel might be fooled in to unmapping an
 in-use bounds table since it does not recognize sharing.
index 39d1723267036a5ba7dff02dc82e39d69a1eeae8..6a0607b99ed8780c45920676e9b122efefa77235 100644 (file)
@@ -5,7 +5,7 @@ memory, it has two choices:
     from areas other than the one we are trying to flush will be
     destroyed and must be refilled later, at some cost.
  2. Use the invlpg instruction to invalidate a single page at a
-    time.  This could potentialy cost many more instructions, but
+    time.  This could potentially cost many more instructions, but
     it is a much more precise operation, causing no collateral
     damage to other TLB entries.
 
@@ -19,7 +19,7 @@ Which method to do depends on a few things:
     work.
  3. The size of the TLB.  The larger the TLB, the more collateral
     damage we do with a full flush.  So, the larger the TLB, the
-    more attrative an individual flush looks.  Data and
+    more attractive an individual flush looks.  Data and
     instructions have separate TLBs, as do different page sizes.
  4. The microarchitecture.  The TLB has become a multi-level
     cache on modern CPUs, and the global flushes have become more
index b1fb30273286c7ae6878f6b43fecb6df8f8f1603..d0648a74fceb50659d2eb018d47178619636b74e 100644 (file)
@@ -36,7 +36,7 @@ between all CPUs.
 
 check_interval
        How often to poll for corrected machine check errors, in seconds
-       (Note output is hexademical). Default 5 minutes.  When the poller
+       (Note output is hexadecimal). Default 5 minutes.  When the poller
        finds MCEs it triggers an exponential speedup (poll more often) on
        the polling interval.  When the poller stops finding MCEs, it
        triggers an exponential backoff (poll less often) on the polling
index 5351c4be3aa2b3d9c3dbeced4e96794cc8bdd534..ea8c7ab56cf5e790df702144200b18b69ae81bd7 100644 (file)
@@ -595,6 +595,10 @@ S: Odd Fixes
 L:     linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
+ALPS PS/2 TOUCHPAD DRIVER
+R:     Pali Rohár <pali.rohar@gmail.com>
+F:     drivers/input/mouse/alps.*
+
 ALTERA MAILBOX DRIVER
 M:     Ley Foon Tan <lftan@altera.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
@@ -1698,8 +1702,6 @@ S:        Maintained
 F:     drivers/edac/altera_edac.
 
 ARM/STI ARCHITECTURE
-M:     Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
-M:     Maxime Coquelin <maxime.coquelin@st.com>
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kernel@stlinux.com
@@ -1732,6 +1734,7 @@ F:        drivers/ata/ahci_st.c
 
 ARM/STM32 ARCHITECTURE
 M:     Maxime Coquelin <mcoquelin.stm32@gmail.com>
+M:     Alexandre Torgue <alexandre.torgue@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
@@ -4511,7 +4514,7 @@ S:        Orphan
 F:     fs/efs/
 
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M:     Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+M:     Douglas Miller <dougmill@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/ibm/ehea/
@@ -7458,7 +7461,7 @@ F:        drivers/scsi/megaraid.*
 F:     drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
-M:     Eugenia Emantayev <eugenia@mellanox.com>
+M:     Tariq Toukan <tariqt@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -7510,6 +7513,7 @@ Q:        http://patchwork.ozlabs.org/project/linux-mtd/list/
 T:     git git://git.infradead.org/linux-mtd.git
 T:     git git://git.infradead.org/l2-mtd.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/mtd/
 F:     drivers/mtd/
 F:     include/linux/mtd/
 F:     include/uapi/mtd/
@@ -8997,6 +9001,7 @@ L:        linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/
+F:     Documentation/pinctrl.txt
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
 
index 4acefc10c13aa587bf640d0140af2d3ee98f07e7..d30c59f9ce92afc4716db730431a6d7d317bc37b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -363,11 +363,13 @@ CHECK             = sparse
 
 CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
                  -Wbitwise -Wno-return-void $(CF)
+NOSTDINC_FLAGS  =
 CFLAGS_MODULE   =
 AFLAGS_MODULE   =
 LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 AFLAGS_KERNEL  =
+LDFLAGS_vmlinux =
 CFLAGS_GCOV    = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
 CFLAGS_KCOV    = -fsanitize-coverage=trace-pc
 
index d4df6be66d583d8708585013c07020fbde28b658..85814e74677d54209908d41025fc329d159d2902 100644 (file)
@@ -66,8 +66,6 @@ endif
 
 endif
 
-cflags-$(CONFIG_ARC_DW2_UNWIND)                += -fasynchronous-unwind-tables
-
 # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
 ifeq ($(atleast_gcc48),y)
 cflags-$(CONFIG_ARC_DW2_UNWIND)                += -gdwarf-2
index e0efff15a5aece29f9bf8ca45b95f856fc206eab..b9192a653b7e3a2a2a34c8ddaed1dfad037f30c0 100644 (file)
@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
         * prelogue is setup (callee regs saved and then fp set and not other
         * way around
         */
-       pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+       pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
        return 0;
 
 #endif
index 8450944b28e6bb7728ccd731dcb709fa7346aa28..22f7a13e20b40e094611a10953180fd1679636cd 100644 (file)
@@ -58,8 +58,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
+                         MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
 
                internal-regs {
 
index a03e56fb5dbc7882abb90fc71298bac1c3e8a56b..ca58eb279d5541631bcc3dea5e56ef87797071ea 100644 (file)
@@ -65,8 +65,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -74,8 +75,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&ahb_gates 46>,
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&ahb_gates 46>,
                                 <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
@@ -84,9 +86,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
-                                <&ahb_gates 46>, <&dram_gates 25>,
-                                <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>, <&ahb_gates 46>,
+                                <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -94,8 +96,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-                                <&ahb_gates 44>, <&ahb_gates 46>,
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+                                <&ahb_gates 36>, <&ahb_gates 44>,
+                                <&ahb_gates 46>,
                                 <&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
index bddd0de88af6be1d3e68b027b644a56e5e0ee61b..367f3301249364e7ad47e4c2e594ebfe6ecd88fc 100644 (file)
@@ -65,8 +65,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>;
                        status = "disabled";
                };
 
@@ -74,7 +74,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>;
                        status = "disabled";
                };
 
@@ -82,8 +83,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-                                <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+                                <&ahb_gates 36>, <&ahb_gates 44>;
                        status = "disabled";
                };
        };
index a8d8b4582397a2dee85929aa43d955efce5009f2..f694482bdeb64ccf1f21ffaa69d3fb3f1027ea0b 100644 (file)
@@ -52,7 +52,7 @@
 
 / {
        model = "NextThing C.H.I.P.";
-       compatible = "nextthing,chip", "allwinner,sun5i-r8";
+       compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
 
        aliases {
                i2c0 = &i2c0;
index febdf4c72fb013d3a2c222a20d4f30cf149d2513..2c34bbbb95700a4c1af75ffb44d92a312b927dea 100644 (file)
@@ -67,8 +67,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -76,8 +77,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
-                                <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>, <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -85,7 +86,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>,
+                       clocks = <&pll3>, <&pll5 1>,
                                 <&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
                                 <&dram_gates 5>, <&dram_gates 26>;
                        status = "disabled";
                pll3x2: pll3x2_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-factor-clock";
+                       clocks = <&pll3>;
                        clock-div = <1>;
                        clock-mult = <2>;
                        clock-output-names = "pll3-2x";
                pll7x2: pll7x2_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-factor-clock";
+                       clocks = <&pll7>;
                        clock-div = <1>;
                        clock-mult = <2>;
                        clock-output-names = "pll7-2x";
index 1eca3b28ac6480e8b1d628cfebfb2c58ebd24280..b6da15d823a61bcbc8d77e6c7d295306795cef09 100644 (file)
 
                                ldo5_reg: ldo5 {
                                        regulator-name = "vddio_sdmmc,avdd_vdac";
-                                       regulator-min-microvolt = <3300000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
                                        regulator-always-on;
                                };
 
        sdhci@78000000 {
                status = "okay";
+               vqmmc-supply = <&ldo5_reg>;
                cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
                wp-gpios = <&gpio TEGRA_GPIO(T, 3) GPIO_ACTIVE_HIGH>;
                power-gpios = <&gpio TEGRA_GPIO(D, 7) GPIO_ACTIVE_HIGH>;
index 893941ec98dc6da787629068359275a863516056..f1bde7c4e736de72253b8e8ae1419688d910b098 100644 (file)
@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
        kvm_timer_vcpu_terminate(vcpu);
        kvm_vgic_vcpu_destroy(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
+       kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
index ecf9e0c3b107808752fe49af062b8758e1784986..e53c6cfcab51cd12c798fd11d663686c77761b02 100644 (file)
@@ -7,9 +7,15 @@ CFLAGS_pmsu.o                  := -march=armv7-a
 obj-$(CONFIG_MACH_MVEBU_ANY)    += system-controller.o mvebu-soc-id.o
 
 ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o pm.o pm-board.o
+obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM)                += pm.o pm-board.o
 obj-$(CONFIG_SMP)               += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
 endif
 
 obj-$(CONFIG_MACH_DOVE)                 += dove.o
-obj-$(CONFIG_MACH_KIRKWOOD)     += kirkwood.o kirkwood-pm.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y                           += kirkwood.o
+obj-$(CONFIG_PM)                += kirkwood-pm.o
+endif
index 7e989d61159c384df1de62b27156ddf22d8fa44f..e80f0dde218919dab8d7a2b5873a4962755f3ee2 100644 (file)
@@ -162,22 +162,16 @@ exit:
 }
 
 /*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
  */
 static void __iomem *
-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
-                             unsigned int mtype, void *caller)
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+                        unsigned int mtype, void *caller)
 {
-       struct resource pcie_mem;
-
-       mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
-       if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
-               mtype = MT_UNCACHED;
-
+       mtype = MT_UNCACHED;
        return __arm_ioremap_caller(phys_addr, size, mtype, caller);
 }
 
@@ -186,7 +180,8 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
        struct device_node *cache_dn;
 
        coherency_cpu_base = of_iomap(np, 0);
-       arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+       arch_ioremap_caller = armada_wa_ioremap_caller;
+       pci_ioremap_set_mem_type(MT_UNCACHED);
 
        /*
         * We should switch the PL310 to I/O coherency mode only if
index 87e1985f3be8c5912d1b1ca4fbc5d8d70024db50..9d9fd4b9a72e574baba29be7381e8740f06b2d66 100644 (file)
 #define APM_CPU_PART_POTENZA           0x000
 
 #define CAVIUM_CPU_PART_THUNDERX       0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX  0x0A2
 
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 
 #ifndef __ASSEMBLY__
 
index a307eb6e7fa8d75e536fe3cff56c5d4f978fe212..7f94755089e200afbd4c015316da4517fe113bcd 100644 (file)
@@ -117,6 +117,8 @@ struct pt_regs {
        };
        u64 orig_x0;
        u64 syscallno;
+       u64 orig_addr_limit;
+       u64 unused;     // maintain 16 byte alignment
 };
 
 #define arch_has_single_step() (1)
index f8e5d47f08807aa41d33c84a323e4bf1f37ffebf..2f4ba774488ad5cc72444a8d351ce63fa504351c 100644 (file)
@@ -60,6 +60,7 @@ int main(void)
   DEFINE(S_PC,                 offsetof(struct pt_regs, pc));
   DEFINE(S_ORIG_X0,            offsetof(struct pt_regs, orig_x0));
   DEFINE(S_SYSCALLNO,          offsetof(struct pt_regs, syscallno));
+  DEFINE(S_ORIG_ADDR_LIMIT,    offsetof(struct pt_regs, orig_addr_limit));
   DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
   BLANK();
   DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id.counter));
index d42789499f17eb322a47da83046ca0119e643c75..af716b65110d901d1a1fea40da801240475c9ccf 100644 (file)
@@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                MIDR_RANGE(MIDR_THUNDERX, 0x00,
                           (1 << MIDR_VARIANT_SHIFT) | 1),
        },
+       {
+       /* Cavium ThunderX, T81 pass 1.0 */
+               .desc = "Cavium erratum 27456",
+               .capability = ARM64_WORKAROUND_CAVIUM_27456,
+               MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+       },
 #endif
        {
        }
index 12e8d2bcb3f9da4e643649442685138dea7966d1..6c3b7345a6c428f5952909ac0833684ca0401f1a 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/errno.h>
 #include <asm/esr.h>
 #include <asm/irq.h>
+#include <asm/memory.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
        mov     x29, xzr                        // fp pointed to user-space
        .else
        add     x21, sp, #S_FRAME_SIZE
-       .endif
+       get_thread_info tsk
+       /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+       ldr     x20, [tsk, #TI_ADDR_LIMIT]
+       str     x20, [sp, #S_ORIG_ADDR_LIMIT]
+       mov     x20, #TASK_SIZE_64
+       str     x20, [tsk, #TI_ADDR_LIMIT]
+       ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
+       .endif /* \el == 0 */
        mrs     x22, elr_el1
        mrs     x23, spsr_el1
        stp     lr, x21, [sp, #S_LR]
        .endm
 
        .macro  kernel_exit, el
+       .if     \el != 0
+       /* Restore the task's original addr_limit. */
+       ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
+       str     x20, [tsk, #TI_ADDR_LIMIT]
+
+       /* No need to restore UAO, it will be restored from SPSR_EL1 */
+       .endif
+
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
@@ -406,7 +422,6 @@ el1_irq:
        bl      trace_hardirqs_off
 #endif
 
-       get_thread_info tsk
        irq_handler
 
 #ifdef CONFIG_PREEMPT
index 013e2cbe792474eb3777c8f01ff65f2356527260..b1166d1e5955cd80096ea7412a2371008ec81b7c 100644 (file)
@@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        }
 
        if (permission_fault(esr) && (addr < USER_DS)) {
-               if (get_fs() == KERNEL_DS)
+               /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+               if (regs->orig_addr_limit == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
 
                if (!search_exception_tables(regs->pc))
index 01d877c6868f05e5a8fb72639a980cd4b7ef66dc..cf3023dced49d7fbe55ab50a8f357b95e5f5194a 100644 (file)
@@ -8,12 +8,13 @@
 
 #include <asm/processor.h>
 
-static void putc(char c);
+static void m32r_putc(char c);
 
 static int puts(const char *s)
 {
        char c;
-       while ((c = *s++)) putc(c);
+       while ((c = *s++))
+               m32r_putc(c);
        return 0;
 }
 
@@ -41,7 +42,7 @@ static int puts(const char *s)
 #define BOOT_SIO0TXB   PLD_ESIO0TXB
 #endif
 
-static void putc(char c)
+static void m32r_putc(char c)
 {
        while ((*BOOT_SIO0STS & 0x3) != 0x3)
                cpu_relax();
@@ -61,7 +62,7 @@ static void putc(char c)
 #define SIO0TXB        (volatile unsigned short *)(0x00efd000 + 30)
 #endif
 
-static void putc(char c)
+static void m32r_putc(char c)
 {
        while ((*SIO0STS & 0x1) == 0)
                cpu_relax();
index fa31be297b85acf1ba31ca4059f2f48825c91d87..73d92ea0ce65b5caca8e79f208995c034872b004 100644 (file)
@@ -288,7 +288,7 @@ _clear_bss:
 #endif
 
        /*
-        *      Assember start up done, start code proper.
+        *      Assembler start up done, start code proper.
         */
        jsr     start_kernel                    /* start Linux kernel */
 
index c525e4c08f8477f45bb0e978b0c436af07471cbc..9abb1a441da082e2fcf8724a0171adec50e4e7b1 100644 (file)
@@ -111,7 +111,7 @@ void __init config_BSP(char *commandp, int size)
 /***************************************************************************/
 
 /*
- * Some 5272 based boards have the FEC ethernet diectly connected to
+ * Some 5272 based boards have the FEC ethernet directly connected to
  * an ethernet switch. In this case we need to use the fixed phy type,
  * and we need to declare it early in boot.
  */
index 821de928dc3f9d14adbb16c1ad70accc9e57f477..6a640be485684f00497a63ee9630bd77df9dbe4a 100644 (file)
@@ -42,7 +42,7 @@ static unsigned long iospace;
 
 /*
  * We need to be carefull probing on bus 0 (directly connected to host
- * bridge). We should only acccess the well defined possible devices in
+ * bridge). We should only access the well defined possible devices in
  * use, ignore aliases and the like.
  */
 static unsigned char mcf_host_slot2sid[32] = {
index 3ee6976f60885e5b2e0e4e2a80df6d824de14694..8f5b6f7dd1366024b973a6eb7846bdd0dd47bd02 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -359,6 +360,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -553,7 +555,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index e96787ffcbced33f9893d2760259177086b13c1f..31bded9c83d45f3a4b6861e2a5a11379324b4209 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -512,7 +514,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 083fe6beac149da81250ca4fccd50798f9e4ef0d..0d7739e04ae2f13d63719db1a45a3a712b668e36 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -350,6 +351,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -533,7 +535,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 475130c06dcba04b646ec2e7c199aee7045dea7d..2cbb5c465fec03487046c08b8aef427a6cc81518 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 4339658c200f3eb8af4bc2645836e109466055cb..96102a42c156ee2d94fa5e069ba8e8c7f879387d 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -514,7 +516,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 831cc8c3a2e259f67d318257e72bcab84e36b8c1..97d88f7dc5a7bd60db5f05db2483bec15de4723b 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -357,6 +358,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -536,7 +538,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 6377afeb522bbb9a235d1bd57f36cc14ca36ac5b..be25ef208f0f6237f055dea3b6cef375e59ea0a9 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -390,6 +391,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -616,7 +618,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 4304b3d56262bc677383ba4389b65d5e9a7625cd..a008344360c9161183afc063795d16bef1a945ff 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -339,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 074bda4094ffd5b0913d4411371e2597774faab2..6735a25f36d446f389a0620faede9d3f37c42c8e 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 07b9fa8d7f2ea71dd09a26c8d9abe4a519baa07c..780c6e9f6cf9113df4c75ab3a2a8bd3220f741a4 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -346,6 +347,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -527,7 +529,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 36e6fae02d458e2dcb1a96c0caee68a5301f0341..44693cf361e5df9c2600f26d36e033e00e7bf2b2 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 903acf929511e5066403bfdd1c4d78bbc4084c35..ef0071d6115834657ee43e674dbb1b0fd8422482 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 78cb60f5bb4dda18b28276c4b8b79c787e948590..9bbffebe3eb504833ed0937670bd4168751d61a4 100644 (file)
@@ -10191,7 +10191,7 @@ xdnrm_con:
 xdnrm_sd:
        mov.l           %a1,-(%sp)
        tst.b           LOCAL_EX(%a0)           # is denorm pos or neg?
-       smi.b           %d1                     # set d0 accodingly
+       smi.b           %d1                     # set d0 accordingly
        bsr.l           unf_sub
        mov.l           (%sp)+,%a1
 xdnrm_exit:
@@ -10990,7 +10990,7 @@ src_qnan_m:
 # routines where an instruction is selected by an index into
 # a large jump table corresponding to a given instruction which
 # has been decoded. Flow continues here where we now decode
-# further accoding to the source operand type.
+# further according to the source operand type.
 #
 
        global          fsinh
@@ -23196,14 +23196,14 @@ m_sign:
 #
 #  1. Branch on the sign of the adjusted exponent.
 #  2p.(positive exp)
-#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   2. Check M16 and the digits in lwords 2 and 3 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Subtract the count from the exp.
 #   5. Check if the exp has crossed zero in #3 above; make the exp abs
 #         and set SE.
 #      6. Multiply the mantissa by 10**count.
 #  2n.(negative exp)
-#   2. Check the digits in lwords 3 and 2 in decending order.
+#   2. Check the digits in lwords 3 and 2 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Add the count to the exp.
 #   5. Check if the exp has crossed zero in #3 above; clear SE.
index 4aedef973cf6fbd02cc67ea65bcbcede0da21c68..3535e6c87eec611bbe03c965c0fc5aeeaeff5343 100644 (file)
@@ -13156,14 +13156,14 @@ m_sign:
 #
 #  1. Branch on the sign of the adjusted exponent.
 #  2p.(positive exp)
-#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   2. Check M16 and the digits in lwords 2 and 3 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Subtract the count from the exp.
 #   5. Check if the exp has crossed zero in #3 above; make the exp abs
 #         and set SE.
 #      6. Multiply the mantissa by 10**count.
 #  2n.(negative exp)
-#   2. Check the digits in lwords 3 and 2 in decending order.
+#   2. Check the digits in lwords 3 and 2 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Add the count to the exp.
 #   5. Check if the exp has crossed zero in #3 above; clear SE.
index 429fe26e320c9813afdf88011195552498a90032..208b4daa14b334f4ce4365e8eb88ca91ab30b818 100644 (file)
@@ -18,7 +18,7 @@
  * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  *
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  *
  * APR/18/2002 : added proper support for MCF5272 DMA controller.
index f186459072e9b0042fbf413d9224367350adcfa6..699f20c8a0fee4fbe6e093ccf0cee8cf042f3cfb 100644 (file)
 /*
  *     I2C module.
  */
-#define MCFI2C_BASE0           (MCF_MBAR + 0x280)      /* Base addreess I2C0 */
+#define MCFI2C_BASE0           (MCF_MBAR + 0x280)      /* Base address I2C0 */
 #define MCFI2C_SIZE0           0x20                    /* Register set size */
 
-#define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base addreess I2C1 */
+#define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base address I2C1 */
 #define MCFI2C_SIZE1           0x20                    /* Register set size */
 
 /*
index 26cc3d5a63f82f2de7021a8a3da28e554f1c815e..8824236e303fe815fe99a24008ba4a7b125fb1a5 100644 (file)
@@ -38,7 +38,7 @@
 /*
  *     MMU Operation register.
  */
-#define        MMUOR_UAA       0x00000001              /* Update allocatiom address */
+#define        MMUOR_UAA       0x00000001              /* Update allocation address */
 #define        MMUOR_ACC       0x00000002              /* TLB access */
 #define        MMUOR_RD        0x00000004              /* TLB access read */
 #define        MMUOR_WR        0x00000000              /* TLB access write */
index fc5b36278d040b0620778cbf1f67499f8db5eb6f..c48d21b68f0485fbce8dc5a85bf553b51ac05642 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Q40 master Chip Control
- * RTC stuff merged for compactnes..
+ * RTC stuff merged for compactness.
 */
 
 #ifndef _Q40_MASTER_H
index 4d2adfb32a2ab4f61951357114a8d053bf74de82..7990b6f50105b19b48417843343778b00c7dc03e 100644 (file)
@@ -60,7 +60,7 @@
  *
  * The host talks to the IOPs using a rather simple message-passing scheme via
  * a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
- * channel is conneced to a specific software driver on the IOP. For example
+ * channel is connected to a specific software driver on the IOP. For example
  * on the SCC IOP there is one channel for each serial port. Each channel has
  * an incoming and and outgoing message queue with a depth of one.
  *
index 759679d9ab96610bec00ec8935063e25f2cfae4d..6d1e760e2a0e21825a4ff54ee83ec7ccca0fa5a3 100644 (file)
@@ -130,7 +130,7 @@ do_fscc=0
        bfextu  %d2{#13,#3},%d0
 .endm
 
-| decode the 8bit diplacement from the brief extension word
+| decode the 8bit displacement from the brief extension word
 .macro fp_decode_disp8
        move.b  %d2,%d0
        ext.w   %d0
index a6b611f1da4390356cd3412672cc0b35398bb0e8..7d44e888134f9621d8056ac56fdf6916394619b7 100644 (file)
@@ -24,7 +24,7 @@ struct mm_struct;
 struct vm_area_struct;
 
 #define PAGE_NONE      __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
-                                _CACHE_CACHABLE_NONCOHERENT)
+                                _page_cachable_default)
 #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
                                 _page_cachable_default)
 #define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
        pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
        pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
        pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
-       pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+       pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
        return pte;
 }
 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #else
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+                    (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
 }
 #endif
 
@@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
-       pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+       pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+                      (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
        return pmd;
 }
 
index 88a5ecaa157b5a2fd474f23759e11e2764f973e5..ab84c89c9e982129c3cde79792548c6078d42065 100644 (file)
@@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
 #define KERN_VIRT_SIZE  __kernel_virt_size
 extern struct page *vmemmap;
 extern unsigned long ioremap_bot;
+extern unsigned long pci_io_base;
 #endif /* __ASSEMBLY__ */
 
 #include <asm/book3s/64/hash.h>
index b5f73cb5eeb6e34b43fd75d5054f867128e792f7..d70101e1e25c1ac902996b2ba29bb8aa6f3f335e 100644 (file)
@@ -647,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                        pci_unlock_rescan_remove();
                }
        } else if (frozen_bus) {
-               eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
+               eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
        }
 
        /*
index 3759df52bd671d883c38aec23fe4b133e6c0d0f7..a5ae49a2dcc47a12163e6ec6ae50ff85d3bd2bb4 100644 (file)
@@ -47,7 +47,6 @@ static int __init pcibios_init(void)
 
        printk(KERN_INFO "PCI: Probing PCI hardware\n");
 
-       pci_io_base = ISA_IO_BASE;
        /* For now, override phys_mem_access_prot. If we need it,g
         * later, we may move that initialization to each ppc_md
         */
index e2f12cbcade9a49287c370204bcc71a4fb23b33a..0b93893424f5b2fe8246694e5e5e31c7b5324a0f 100644 (file)
@@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
                current->thread.regs = regs - 1;
        }
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       /*
+        * Clear any transactional state, we're exec()ing. The cause is
+        * not important as there will never be a recheckpoint so it's not
+        * user visible.
+        */
+       if (MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(0);
+#endif
+
        memset(regs->gpr, 0, sizeof(regs->gpr));
        regs->ctr = 0;
        regs->link = 0;
index bf8f34a5867088d1a2346d17b89e2daaabe365ab..b7019b559ddbffd817563e6f0d3ae5f3d134b2ca 100644 (file)
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
        std     r3, STK_PARAM(R3)(r1)
        SAVE_NVGPRS(r1)
 
-       /* We need to setup MSR for VSX register save instructions.  Here we
-        * also clear the MSR RI since when we do the treclaim, we won't have a
-        * valid kernel pointer for a while.  We clear RI here as it avoids
-        * adding another mtmsr closer to the treclaim.  This makes the region
-        * maked as non-recoverable wider than it needs to be but it saves on
-        * inserting another mtmsrd later.
-        */
+       /* We need to setup MSR for VSX register save instructions. */
        mfmsr   r14
        mr      r15, r14
        ori     r15, r15, MSR_FP
-       li      r16, MSR_RI
+       li      r16, 0
        ori     r16, r16, MSR_EE /* IRQs hard off */
        andc    r15, r15, r16
        oris    r15, r15, MSR_VEC@h
@@ -176,7 +170,17 @@ dont_backup_fp:
 1:     tdeqi   r6, 0
        EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
 
-       /* The moment we treclaim, ALL of our GPRs will switch
+       /* Clear MSR RI since we are about to change r1, EE is already off. */
+       li      r4, 0
+       mtmsrd  r4, 1
+
+       /*
+        * BE CAREFUL HERE:
+        * At this point we can't take an SLB miss since we have MSR_RI
+        * off. Load only to/from the stack/paca which are in SLB bolted regions
+        * until we turn MSR RI back on.
+        *
+        * The moment we treclaim, ALL of our GPRs will switch
         * to user register state.  (FPRs, CCR etc. also!)
         * Use an sprg and a tm_scratch in the PACA to shuffle.
         */
@@ -197,6 +201,11 @@ dont_backup_fp:
 
        /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
+
+       /* Reset MSR RI so we can take SLB faults again */
+       li      r11, MSR_RI
+       mtmsrd  r11, 1
+
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -397,11 +406,6 @@ restore_gprs:
        ld      r5, THREAD_TM_DSCR(r3)
        ld      r6, THREAD_TM_PPR(r3)
 
-       /* Clear the MSR RI since we are about to change R1.  EE is already off
-        */
-       li      r4, 0
-       mtmsrd  r4, 1
-
        REST_GPR(0, r7)                         /* GPR0 */
        REST_2GPRS(2, r7)                       /* GPR2-3 */
        REST_GPR(4, r7)                         /* GPR4 */
@@ -439,10 +443,33 @@ restore_gprs:
        ld      r6, _CCR(r7)
        mtcr    r6
 
-       REST_GPR(1, r7)                         /* GPR1 */
-       REST_GPR(5, r7)                         /* GPR5-7 */
        REST_GPR(6, r7)
-       ld      r7, GPR7(r7)
+
+       /*
+        * Store r1 and r5 on the stack so that we can access them
+        * after we clear MSR RI.
+        */
+
+       REST_GPR(5, r7)
+       std     r5, -8(r1)
+       ld      r5, GPR1(r7)
+       std     r5, -16(r1)
+
+       REST_GPR(7, r7)
+
+       /* Clear MSR RI since we are about to change r1. EE is already off */
+       li      r5, 0
+       mtmsrd  r5, 1
+
+       /*
+        * BE CAREFUL HERE:
+        * At this point we can't take an SLB miss since we have MSR_RI
+        * off. Load only to/from the stack/paca which are in SLB bolted regions
+        * until we turn MSR RI back on.
+        */
+
+       ld      r5, -8(r1)
+       ld      r1, -16(r1)
 
        /* Commit register state as checkpointed state: */
        TRECHKPT
index 5b22ba0b58bc9e5168a4d12022bb6c176ab3a14c..2971ea18c768bcda67c36874b4fba274f607abc5 100644 (file)
@@ -922,6 +922,10 @@ void __init hash__early_init_mmu(void)
        vmemmap = (struct page *)H_VMEMMAP_BASE;
        ioremap_bot = IOREMAP_BASE;
 
+#ifdef CONFIG_PCI
+       pci_io_base = ISA_IO_BASE;
+#endif
+
        /* Initialize the MMU Hash table and create the linear mapping
         * of memory. Has to be done before SLB initialization as this is
         * currently where the page size encoding is obtained.
index e58707deef5c2f90530d088dbc786417d191af04..7931e1496f0d59d2ab5c015a48824d15e5fb3f05 100644 (file)
@@ -328,6 +328,11 @@ void __init radix__early_init_mmu(void)
        __vmalloc_end = RADIX_VMALLOC_END;
        vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
        ioremap_bot = IOREMAP_BASE;
+
+#ifdef CONFIG_PCI
+       pci_io_base = ISA_IO_BASE;
+#endif
+
        /*
         * For now radix also use the same frag size
         */
index 5e04f3cbd320d78e963c7ae18639c94d4b83952c..8ae236b0f80b35b127d3b87d64e1abff23d437c8 100644 (file)
@@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
                "       la      %0,0\n"
                "1:\n"
                EX_TABLE(0b,1b)
-               : "=d" (rc), "=d" (orig_fpc)
+               : "=d" (rc), "=&d" (orig_fpc)
                : "d" (fpc), "0" (-EINVAL));
        return rc;
 }
index f20abdb5630adceaa75690907ddf7cdedc098146..d14069d4b88dcd1a38fe2a3ca309cf8723909bf3 100644 (file)
@@ -2064,12 +2064,5 @@ void s390_reset_system(void)
        S390_lowcore.program_new_psw.addr =
                (unsigned long) s390_base_pgm_handler;
 
-       /*
-        * Clear subchannel ID and number to signal new kernel that no CCW or
-        * SCSI IPL has been done (for kexec and kdump)
-        */
-       S390_lowcore.subchannel_id = 0;
-       S390_lowcore.subchannel_nr = 0;
-
        do_reset_calls();
 }
index 33787ee817f0cdaad78814849b0aad2dc7e2b407..91eac39625beb1b85fb3651a1b1758d296779c9a 100644 (file)
@@ -263,7 +263,7 @@ static bool check_hw_exists(void)
 
 msr_fail:
        pr_cont("Broken PMU hardware detected, using software events only.\n");
-       pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+       printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
                boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
                reg, val_new);
 
@@ -2319,7 +2319,7 @@ void
 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
-       const void __user *fp;
+       const unsigned long __user *fp;
 
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
@@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
                return;
 
-       fp = (void __user *)regs->bp;
+       fp = (unsigned long __user *)regs->bp;
 
        perf_callchain_store(entry, regs->ip);
 
@@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        pagefault_disable();
        while (entry->nr < entry->max_stack) {
                unsigned long bytes;
+
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
-               if (!access_ok(VERIFY_READ, fp, 16))
+               if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
                        break;
 
-               bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+               bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
                if (bytes != 0)
                        break;
-               bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+               bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
                if (bytes != 0)
                        break;
 
index 3660b2cf245ad7d22eaf0f9d1cf2602d270b62c2..06c2baa518144336133f775299b086d0ac4306ec 100644 (file)
@@ -1,8 +1,8 @@
 obj-$(CONFIG_CPU_SUP_INTEL)            += core.o bts.o cqm.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += ds.o knc.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += lbr.o p4.o p6.o pt.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += intel-rapl.o
-intel-rapl-objs                                := rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += intel-rapl-perf.o
+intel-rapl-perf-objs                   := rapl.o
 obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
 intel-uncore-objs                      := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
 obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
index 7c666958a6250354aa204d24e73f94670264ffee..9b4f9d3ce4650cc7c38632b73c10e5020c0695a3 100644 (file)
@@ -115,6 +115,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -139,6 +143,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -182,6 +190,16 @@ struct event_constraint intel_skl_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
+
+       /*
+        * when HT is off, these can only run on the bottom 4 counters
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
+
        EVENT_CONSTRAINT_END
 };
 
@@ -250,6 +268,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -264,6 +286,13 @@ struct event_constraint intel_bdw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+       /*
+        * when HT is off, these can only run on the bottom 4 counters
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
        EVENT_CONSTRAINT_END
 };
 
index 4a413485f9eb8ef58ec71c77ff2594f4300c8ea6..c64b1e9c5d1a30d916be2d944a3e94134b240fb0 100644 (file)
 #define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
 #define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG       X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  */
 #define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
+#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index fdcc04020636e33269d7a756162cfe852f509823..7c1c89598688bab2edc26dce70ab722e5f79b123 100644 (file)
@@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
        return product;
 }
 
-static __always_inline
-u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
-{
-       u64 delta = rdtsc_ordered() - src->tsc_timestamp;
-       return pvclock_scale_delta(delta, src->tsc_to_system_mul,
-                                  src->tsc_shift);
-}
-
 static __always_inline
 unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
                               cycle_t *cycles, u8 *flags)
 {
        unsigned version;
-       cycle_t ret, offset;
-       u8 ret_flags;
+       cycle_t offset;
+       u64 delta;
 
        version = src->version;
+       /* Make the latest version visible */
+       smp_rmb();
 
-       offset = pvclock_get_nsec_offset(src);
-       ret = src->system_time + offset;
-       ret_flags = src->flags;
-
-       *cycles = ret;
-       *flags = ret_flags;
+       delta = rdtsc_ordered() - src->tsc_timestamp;
+       offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+                                  src->tsc_shift);
+       *cycles = src->system_time + offset;
+       *flags = src->flags;
        return version;
 }
 
index a147e676fc7b3439d156534472d63be5c2d10f4a..e991d5c8bb3a1b1d21c08f198a6d38c97f45effb 100644 (file)
@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
        while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
                i++;
 
-       if (i == 0)
-               return 0;
+       if (!i)
+               return -ENODEV;
 
        nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
        if (!nb)
index 757390eb562b43fc53a361b3794717b70c9ac364..de7501edb21ce5de48db107191cd87a7c2515c65 100644 (file)
 
 #include <linux/pci.h>
 #include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
 #include <drm/i915_drm.h>
 #include <asm/pci-direct.h>
 #include <asm/dma.h>
@@ -21,6 +25,9 @@
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg)  pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -75,6 +82,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
 {
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_X86_IO_APIC
+       /*
+        * Only applies to Nvidia root ports (bus 0) and not to
+        * Nvidia graphics cards with PCI ports on secondary buses.
+        */
+       if (num)
+               return;
+
        /*
         * All timer overrides on Nvidia are
         * wrong unless HPET is enabled.
@@ -574,6 +588,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
 #endif
 }
 
+#define BCM4331_MMIO_SIZE      16384
+#define BCM4331_PM_CAP         0x40
+#define bcma_aread32(reg)      ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val)        iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+       void __iomem *mmio;
+       u16 pmcsr;
+       u64 addr;
+       int i;
+
+       if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+               return;
+
+       /* Card may have been put into PCI_D3hot by grub quirk */
+       pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+       if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+               write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+               mdelay(10);
+
+               pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+               if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+                       dev_err("Cannot power up Apple AirPort card\n");
+                       return;
+               }
+       }
+
+       addr  =      read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+       addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+       addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+       mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+       if (!mmio) {
+               dev_err("Cannot iomap Apple AirPort card\n");
+               return;
+       }
+
+       pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+       for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+               udelay(10);
+
+       bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+       bcma_aread32(BCMA_RESET_CTL);
+       udelay(1);
+
+       bcma_awrite32(BCMA_RESET_CTL, 0);
+       bcma_aread32(BCMA_RESET_CTL);
+       udelay(10);
+
+       early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
 
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
@@ -587,12 +656,6 @@ struct chipset {
        void (*f)(int num, int slot, int func);
 };
 
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
 static struct chipset early_qrk[] __initdata = {
        { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
          PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -622,9 +685,13 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_BROADCOM, 0x4331,
+         PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
 };
 
+static void __init early_pci_scan_bus(int bus);
+
 /**
  * check_dev_quirk - apply early quirks to a given PCI device
  * @num: bus number
@@ -633,7 +700,7 @@ static struct chipset early_qrk[] __initdata = {
  *
  * Check the vendor & device ID against the early quirks table.
  *
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
  * poke at this device again.
  */
 static int __init check_dev_quirk(int num, int slot, int func)
@@ -642,6 +709,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
        u16 vendor;
        u16 device;
        u8 type;
+       u8 sec;
        int i;
 
        class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
@@ -669,25 +737,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
 
        type = read_pci_config_byte(num, slot, func,
                                    PCI_HEADER_TYPE);
+
+       if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+               sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+               if (sec > num)
+                       early_pci_scan_bus(sec);
+       }
+
        if (!(type & 0x80))
                return -1;
 
        return 0;
 }
 
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
 {
        int slot, func;
 
-       if (!early_pci_allowed())
-               return;
-
        /* Poor man's PCI discovery */
-       /* Only scan the root bus */
        for (slot = 0; slot < 32; slot++)
                for (func = 0; func < 8; func++) {
                        /* Only probe function 0 on single fn devices */
-                       if (check_dev_quirk(0, slot, func))
+                       if (check_dev_quirk(bus, slot, func))
                                break;
                }
 }
+
+void __init early_quirks(void)
+{
+       if (!early_pci_allowed())
+               return;
+
+       early_pci_scan_bus(0);
+}
index 99bfc025111d3dd62bc7c1753d126e97fb4e48ac..06c58ce46762ed9165cd3317292bbc57efa7055c 100644 (file)
@@ -61,11 +61,16 @@ void pvclock_resume(void)
 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
 {
        unsigned version;
-       cycle_t ret;
        u8 flags;
 
        do {
-               version = __pvclock_read_cycles(src, &ret, &flags);
+               version = src->version;
+               /* Make the latest version visible */
+               smp_rmb();
+
+               flags = src->flags;
+               /* Make sure that the version double-check is last. */
+               smp_rmb();
        } while ((src->version & 1) || version != src->version);
 
        return flags & valid_flags;
@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
 
        do {
                version = __pvclock_read_cycles(src, &ret, &flags);
+               /* Make sure that the version double-check is last. */
+               smp_rmb();
        } while ((src->version & 1) || version != src->version);
 
        if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
index bbb5b283ff63a9285ddcedc113a91dbf22859bbb..a397200281c119aabb283fce45c4a921f3f4cd24 100644 (file)
@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
 
        /* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
        if (guest_tsc < tsc_deadline)
-               __delay(tsc_deadline - guest_tsc);
+               __delay(min(tsc_deadline - guest_tsc,
+                       nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
 }
 
 static void start_apic_timer(struct kvm_lapic *apic)
index 003618e324ce4bd584458ffbf3b5de6d32635d4e..64a79f271276a025ea76759189cddfbe967ae1ed 100644 (file)
@@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 
        /* Checks for #GP/#SS exceptions. */
        exn = false;
-       if (is_protmode(vcpu)) {
+       if (is_long_mode(vcpu)) {
+               /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+                * non-canonical form. This is the only check on the memory
+                * destination for long mode!
+                */
+               exn = is_noncanonical_address(*ret);
+       } else if (is_protmode(vcpu)) {
                /* Protected mode: apply checks for segment validity in the
                 * following order:
                 * - segment type check (#GP(0) may be thrown)
@@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
                         * execute-only code segment
                         */
                        exn = ((s.type & 0xa) == 8);
-       }
-       if (exn) {
-               kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
-               return 1;
-       }
-       if (is_long_mode(vcpu)) {
-               /* Long mode: #GP(0)/#SS(0) if the memory address is in a
-                * non-canonical form. This is an only check for long mode.
-                */
-               exn = is_noncanonical_address(*ret);
-       } else if (is_protmode(vcpu)) {
+               if (exn) {
+                       kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+                       return 1;
+               }
                /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
                 */
                exn = (s.unusable != 0);
index 902d9da123929be3b8a02e2acc3f315d71c80c2a..7da5dd2057a928fd3eebed2052a6d89ee0750924 100644 (file)
@@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
 static unsigned long max_tsc_khz;
 
-static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
-{
-       return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
-                                  vcpu->arch.virtual_tsc_shift);
-}
-
 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
 {
        u64 v = (u64)khz * (1000000 + ppm);
index 7ce3634ab5fe6189a95163d8cd9781a923d21727..a82ca466b62e862c88a64b89bb602139fff3ab42 100644 (file)
@@ -2,6 +2,7 @@
 #define ARCH_X86_KVM_X86_H
 
 #include <linux/kvm_host.h>
+#include <asm/pvclock.h>
 #include "kvm_cache_regs.h"
 
 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
 
 extern struct static_key kvm_no_apic_vcpu;
 
+static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
+{
+       return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
+                                  vcpu->arch.virtual_tsc_shift);
+}
+
 /* Same "calling convention" as do_div:
  * - divide (n << 32) by base
  * - put result in n
index 1b1110fa00570e0d242926ca8f06adc37db518a5..0493c17b8a516f4212bc21fe5e32d0bce466c24f 100644 (file)
@@ -54,8 +54,8 @@ static int kasan_die_handler(struct notifier_block *self,
                             void *data)
 {
        if (val == DIE_GPF) {
-               pr_emerg("CONFIG_KASAN_INLINE enabled");
-               pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+               pr_emerg("CONFIG_KASAN_INLINE enabled\n");
+               pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
        }
        return NOTIFY_OK;
 }
index b2a4e2a61f6b8f41775c47b0aed020a6441e5d66..3cd69832d7f4c6f3743bbb3a190c39672c89461d 100644 (file)
@@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
                return -ENODEV;
 
        printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+       acpi_irq_penalty_init();
        pcibios_enable_irq = acpi_pci_irq_enable;
        pcibios_disable_irq = acpi_pci_irq_disable;
        x86_init.pci.init_irq = x86_init_noop;
index 009947d419a61ae2fc41f58ad9d85ece1555fec2..f2b5e6a5cf956102905f64462db824a8a355cca5 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mtrr.h>
 #include <asm/sections.h>
 #include <asm/suspend.h>
+#include <asm/tlbflush.h>
 
 /* Defined in hibernate_asm_64.S */
 extern asmlinkage __visible int restore_image(void);
@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
  * kernel's text (this value is passed in the image header).
  */
 unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
 
 /*
  * Value of the cr3 register from before the hibernation (this value is passed
@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
 
 pgd_t *temp_level4_pgt __visible;
 
-void *relocated_restore_code __visible;
+unsigned long relocated_restore_code __visible;
+
+static int set_up_temporary_text_mapping(void)
+{
+       pmd_t *pmd;
+       pud_t *pud;
+
+       /*
+        * The new mapping only has to cover the page containing the image
+        * kernel's entry point (jump_address_phys), because the switch over to
+        * it is carried out by relocated code running from a page allocated
+        * specifically for this purpose and covered by the identity mapping, so
+        * the temporary kernel text mapping is only needed for the final jump.
+        * Moreover, in that mapping the virtual address of the image kernel's
+        * entry point must be the same as its virtual address in the image
+        * kernel (restore_jump_address), so the image kernel's
+        * restore_registers() code doesn't find itself in a different area of
+        * the virtual address space after switching over to the original page
+        * tables used by the image kernel.
+        */
+       pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+       if (!pud)
+               return -ENOMEM;
+
+       pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+       if (!pmd)
+               return -ENOMEM;
+
+       set_pmd(pmd + pmd_index(restore_jump_address),
+               __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+       set_pud(pud + pud_index(restore_jump_address),
+               __pud(__pa(pmd) | _KERNPG_TABLE));
+       set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+               __pgd(__pa(pud) | _KERNPG_TABLE));
+
+       return 0;
+}
 
 static void *alloc_pgt_page(void *context)
 {
@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
        if (!temp_level4_pgt)
                return -ENOMEM;
 
-       /* It is safe to reuse the original kernel mapping */
-       set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
-               init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+       /* Prepare a temporary mapping for the kernel text */
+       result = set_up_temporary_text_mapping();
+       if (result)
+               return result;
 
        /* Set up the direct mapping from scratch */
        for (i = 0; i < nr_pfn_mapped; i++) {
@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
        return 0;
 }
 
+static int relocate_restore_code(void)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+
+       relocated_restore_code = get_safe_page(GFP_ATOMIC);
+       if (!relocated_restore_code)
+               return -ENOMEM;
+
+       memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+
+       /* Make the page containing the relocated code executable */
+       pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+       pud = pud_offset(pgd, relocated_restore_code);
+       if (pud_large(*pud)) {
+               set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+       } else {
+               pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
+
+               if (pmd_large(*pmd)) {
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+               } else {
+                       pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
+
+                       set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+               }
+       }
+       __flush_tlb_all();
+
+       return 0;
+}
+
 int swsusp_arch_resume(void)
 {
        int error;
 
        /* We have got enough memory and from now on we cannot recover */
-       if ((error = set_up_temporary_mappings()))
+       error = set_up_temporary_mappings();
+       if (error)
                return error;
 
-       relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
-       if (!relocated_restore_code)
-               return -ENOMEM;
-       memcpy(relocated_restore_code, &core_restore_code,
-              &restore_registers - &core_restore_code);
+       error = relocate_restore_code();
+       if (error)
+               return error;
 
        restore_image();
        return 0;
@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
 
 struct restore_data_record {
        unsigned long jump_address;
+       unsigned long jump_address_phys;
        unsigned long cr3;
        unsigned long magic;
 };
 
-#define RESTORE_MAGIC  0x0123456789ABCDEFUL
+#define RESTORE_MAGIC  0x123456789ABCDEF0UL
 
 /**
  *     arch_hibernation_header_save - populate the architecture specific part
@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 
        if (max_size < sizeof(struct restore_data_record))
                return -EOVERFLOW;
-       rdr->jump_address = restore_jump_address;
+       rdr->jump_address = (unsigned long)&restore_registers;
+       rdr->jump_address_phys = __pa_symbol(&restore_registers);
        rdr->cr3 = restore_cr3;
        rdr->magic = RESTORE_MAGIC;
        return 0;
@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
        struct restore_data_record *rdr = addr;
 
        restore_jump_address = rdr->jump_address;
+       jump_address_phys = rdr->jump_address_phys;
        restore_cr3 = rdr->cr3;
        return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
 }
index 4400a43b9e28f20aaac68c1a20091e683e31b799..3177c2bc26f63e9fe1bb82739be9c0c2e406f00f 100644 (file)
@@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
        pushfq
        popq    pt_regs_flags(%rax)
 
-       /* save the address of restore_registers */
-       movq    $restore_registers, %rax
-       movq    %rax, restore_jump_address(%rip)
        /* save cr3 */
        movq    %cr3, %rax
        movq    %rax, restore_cr3(%rip)
@@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
 ENDPROC(swsusp_arch_suspend)
 
 ENTRY(restore_image)
-       /* switch to temporary page tables */
-       movq    $__PAGE_OFFSET, %rdx
-       movq    temp_level4_pgt(%rip), %rax
-       subq    %rdx, %rax
-       movq    %rax, %cr3
-       /* Flush TLB */
-       movq    mmu_cr4_features(%rip), %rax
-       movq    %rax, %rdx
-       andq    $~(X86_CR4_PGE), %rdx
-       movq    %rdx, %cr4;  # turn off PGE
-       movq    %cr3, %rcx;  # flush TLB
-       movq    %rcx, %cr3;
-       movq    %rax, %cr4;  # turn PGE back on
-
        /* prepare to jump to the image kernel */
-       movq    restore_jump_address(%rip), %rax
-       movq    restore_cr3(%rip), %rbx
+       movq    restore_jump_address(%rip), %r8
+       movq    restore_cr3(%rip), %r9
+
+       /* prepare to switch to temporary page tables */
+       movq    temp_level4_pgt(%rip), %rax
+       movq    mmu_cr4_features(%rip), %rbx
 
        /* prepare to copy image data to their original locations */
        movq    restore_pblist(%rip), %rdx
+
+       /* jump to relocated restore code */
        movq    relocated_restore_code(%rip), %rcx
        jmpq    *%rcx
 
        /* code below has been relocated to a safe page */
 ENTRY(core_restore_code)
+       /* switch to temporary page tables */
+       movq    $__PAGE_OFFSET, %rcx
+       subq    %rcx, %rax
+       movq    %rax, %cr3
+       /* flush TLB */
+       movq    %rbx, %rcx
+       andq    $~(X86_CR4_PGE), %rcx
+       movq    %rcx, %cr4;  # turn off PGE
+       movq    %cr3, %rcx;  # flush TLB
+       movq    %rcx, %cr3;
+       movq    %rbx, %cr4;  # turn PGE back on
 .Lloop:
        testq   %rdx, %rdx
        jz      .Ldone
@@ -96,24 +96,17 @@ ENTRY(core_restore_code)
        /* progress to the next pbe */
        movq    pbe_next(%rdx), %rdx
        jmp     .Lloop
+
 .Ldone:
        /* jump to the restore_registers address from the image header */
-       jmpq    *%rax
-       /*
-        * NOTE: This assumes that the boot kernel's text mapping covers the
-        * image kernel's page containing restore_registers and the address of
-        * this page is the same as in the image kernel's text mapping (it
-        * should always be true, because the text mapping is linear, starting
-        * from 0, and is supposed to cover the entire kernel text for every
-        * kernel).
-        *
-        * code below belongs to the image kernel
-        */
+       jmpq    *%r8
 
+        /* code below belongs to the image kernel */
+       .align PAGE_SIZE
 ENTRY(restore_registers)
        FRAME_BEGIN
        /* go back to the original page tables */
-       movq    %rbx, %cr3
+       movq    %r9, %cr3
 
        /* Flush TLB, including "global" things (vmalloc) */
        movq    mmu_cr4_features(%rip), %rax
index cc7800e9eb441e2b7737a152f0dbb60182821408..01b8116298a13b5463e7969ce66c0b037bcfccd5 100644 (file)
@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
        if (ret)
                goto out;
        ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+       task_lock(p);
        if (p->io_context)
                ret = p->io_context->ioprio;
+       task_unlock(p);
 out:
        return ret;
 }
index 6a76d5c70ef6e1ddb742bfc94671a499184c891d..9492e1c22d3891417d3e42baf2e99661041c1135 100644 (file)
@@ -124,5 +124,10 @@ int mscode_note_digest(void *context, size_t hdrlen,
        struct pefile_context *ctx = context;
 
        ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
-       return ctx->digest ? 0 : -ENOMEM;
+       if (!ctx->digest)
+               return -ENOMEM;
+
+       ctx->digest_len = vlen;
+
+       return 0;
 }
index 44b746e9df1b4110e6f8da4ac5b23419cb63feac..2ffd69769466082eaf55cdfe71fb67704e0364af 100644 (file)
@@ -227,7 +227,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
                                if (asymmetric_key_id_same(p->id, auth))
                                        goto found_issuer_check_skid;
                        }
-               } else {
+               } else if (sig->auth_ids[1]) {
                        auth = sig->auth_ids[1];
                        pr_debug("- want %*phN\n", auth->len, auth->data);
                        for (p = pkcs7->certs; p; p = p->next) {
index ac4bddf669de2195bce0864a28308031602245da..19d1afb9890f660e43ee95261cf0a703e44f92c6 100644 (file)
@@ -87,7 +87,7 @@ int restrict_link_by_signature(struct key *trust_keyring,
 
        sig = payload->data[asym_auth];
        if (!sig->auth_ids[0] && !sig->auth_ids[1])
-               return 0;
+               return -ENOKEY;
 
        if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
                return -EPERM;
index 43fe85f20d577b4f3d1bbd6576b6d752bc578531..7097a3395b2529fd123b2c0b14bcce6992fdee49 100644 (file)
@@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
        [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
        [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
        [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
        [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = 0,
 };
 
index ead8dc0d084e749e35733abc164f1e209aca3f6f..8ba426635b1b39631824f7bb2b1f2e3a056bafa4 100644 (file)
@@ -102,10 +102,10 @@ struct pkcs1pad_inst_ctx {
 };
 
 struct pkcs1pad_request {
-       struct akcipher_request child_req;
-
        struct scatterlist in_sg[3], out_sg[2];
        uint8_t *in_buf, *out_buf;
+
+       struct akcipher_request child_req;
 };
 
 static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
index 1f4128487dd4be1b342df522e010de6c29c6756b..dee86925a9a107f028ef14adf110eb583500c0c7 100644 (file)
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
        crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
        ret = n;
 out:
-       acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
+       acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
        return ret;
 }
 
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
        crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
        ret = n;
 out:
-       acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
+       acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
        return n;
 }
 
index 21932d640a41f6f5b060b8fcb64fbc2d9ac131ab..a1d177d58254cd8e201836481a51b809ec98c628 100644 (file)
@@ -108,9 +108,7 @@ acpi_ex_add_table(u32 table_index,
 
        /* Add the table to the namespace */
 
-       acpi_ex_exit_interpreter();
        status = acpi_ns_load_table(table_index, parent_node);
-       acpi_ex_enter_interpreter();
        if (ACPI_FAILURE(status)) {
                acpi_ut_remove_reference(obj_desc);
                *ddb_handle = NULL;
index 1783cd7e14467b8f43e9f087884b3b847e6b5492..f631a47724f05332644a54e290d9667b4b326010 100644 (file)
@@ -47,7 +47,6 @@
 #include "acparser.h"
 #include "acdispat.h"
 #include "actables.h"
-#include "acinterp.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsparse")
@@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
 
        ACPI_FUNCTION_TRACE(ns_parse_table);
 
-       acpi_ex_enter_interpreter();
-
        /*
         * AML Parse, pass 1
         *
@@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               goto error_exit;
+               return_ACPI_STATUS(status);
        }
 
        /*
@@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               goto error_exit;
+               return_ACPI_STATUS(status);
        }
 
-error_exit:
-       acpi_ex_exit_interpreter();
        return_ACPI_STATUS(status);
 }
index 73c76d646064700dc1bb51905b4dbc70b415aabf..290d6f5be44b414b7d89ec0a48be1b234330c254 100644 (file)
@@ -1331,8 +1331,6 @@ static int ec_install_handlers(struct acpi_ec *ec)
 
 static void ec_remove_handlers(struct acpi_ec *ec)
 {
-       acpi_ec_stop(ec, false);
-
        if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
                if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                        ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
@@ -1340,6 +1338,19 @@ static void ec_remove_handlers(struct acpi_ec *ec)
                clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
        }
 
+       /*
+        * Stops handling the EC transactions after removing the operation
+        * region handler. This is required because _REG(DISCONNECT)
+        * invoked during the removal can result in new EC transactions.
+        *
+        * Flushes the EC requests and thus disables the GPE before
+        * removing the GPE handler. This is required by the current ACPICA
+        * GPE core. ACPICA GPE core will automatically disable a GPE when
+        * it is indicated but there is no way to handle it. So the drivers
+        * must disable the GPEs prior to removing the GPE handlers.
+        */
+       acpi_ec_stop(ec, false);
+
        if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
                if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
                                        &acpi_ec_gpe_handler)))
index 2215fc847fa90c572b40b426a5edc2a88f5792a2..1f0e06065ae6c5180d2e8deae426aef0966fa3dd 100644 (file)
@@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev,
 {
        struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-       return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
+       return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
 }
 static DEVICE_ATTR_RO(format);
 
@@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev,
                                continue;
                        if (nfit_dcr->dcr->code == dcr->code)
                                continue;
-                       rc = sprintf(buf, "%#x\n",
-                                       be16_to_cpu(nfit_dcr->dcr->code));
+                       rc = sprintf(buf, "0x%04x\n",
+                                       le16_to_cpu(nfit_dcr->dcr->code));
                        break;
                }
                if (rc != ENXIO)
@@ -1151,9 +1151,10 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                if (disable_vendor_specific)
                        dsm_mask &= ~(1 << 8);
        } else {
-               dev_err(dev, "unknown dimm command family\n");
+               dev_dbg(dev, "unknown dimm command family\n");
                nfit_mem->family = -1;
-               return force_enable_dimms ? 0 : -ENODEV;
+               /* DSMs are optional, continue loading the driver... */
+               return 0;
        }
 
        uuid = to_nfit_uuid(nfit_mem->family);
index 11cb38348aef600cac3215985d98bc7559e37d63..02b9ea1e8d2e6cc6ad978493529aa0ec59f606d8 100644 (file)
@@ -53,12 +53,12 @@ enum nfit_uuids {
 };
 
 /*
- * Region format interface codes are stored as an array of bytes in the
- * NFIT DIMM Control Region structure
+ * Region format interface codes are stored with the interface as the
+ * LSB and the function as the MSB.
  */
-#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
-#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
-#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
+#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
 
 enum {
        NFIT_BLK_READ_FLUSH = 1,
index 8fc7323ed3e847197cfdf1e6fdee728cdfdd8e7b..c983bf733ad37d7b608c9410108dcef32cdbf02b 100644 (file)
@@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
 {
        struct acpi_pci_link *link;
        int penalty = 0;
+       int i;
 
        list_for_each_entry(link, &acpi_link_list, list) {
                /*
@@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
                 */
                if (link->irq.active && link->irq.active == irq)
                        penalty += PIRQ_PENALTY_PCI_USING;
-               else {
-                       int i;
-
-                       /*
-                        * If a link is inactive, penalize the IRQs it
-                        * might use, but not as severely.
-                        */
-                       for (i = 0; i < link->irq.possible_count; i++)
-                               if (link->irq.possible[i] == irq)
-                                       penalty += PIRQ_PENALTY_PCI_POSSIBLE /
-                                               link->irq.possible_count;
-               }
+
+               /*
+                * penalize the IRQs PCI might use, but not as severely.
+                */
+               for (i = 0; i < link->irq.possible_count; i++)
+                       if (link->irq.possible[i] == irq)
+                               penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+                                       link->irq.possible_count;
        }
 
        return penalty;
@@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
 {
        int penalty = 0;
 
-       if (irq < ACPI_MAX_ISA_IRQS)
-               penalty += acpi_isa_irq_penalty[irq];
-
        /*
        * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
        * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
@@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
                        penalty += PIRQ_PENALTY_PCI_USING;
        }
 
+       if (irq < ACPI_MAX_ISA_IRQS)
+               return penalty + acpi_isa_irq_penalty[irq];
+
        penalty += acpi_irq_pci_sharing_penalty(irq);
        return penalty;
 }
 
+int __init acpi_irq_penalty_init(void)
+{
+       struct acpi_pci_link *link;
+       int i;
+
+       /*
+        * Update penalties to facilitate IRQ balancing.
+        */
+       list_for_each_entry(link, &acpi_link_list, list) {
+
+               /*
+                * reflect the possible and active irqs in the penalty table --
+                * useful for breaking ties.
+                */
+               if (link->irq.possible_count) {
+                       int penalty =
+                           PIRQ_PENALTY_PCI_POSSIBLE /
+                           link->irq.possible_count;
+
+                       for (i = 0; i < link->irq.possible_count; i++) {
+                               if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+                                       acpi_isa_irq_penalty[link->irq.
+                                                        possible[i]] +=
+                                           penalty;
+                       }
+
+               } else if (link->irq.active &&
+                               (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+                       acpi_isa_irq_penalty[link->irq.active] +=
+                           PIRQ_PENALTY_PCI_POSSIBLE;
+               }
+       }
+
+       return 0;
+}
+
 static int acpi_irq_balance = -1;      /* 0: static, 1: balance */
 
 static int acpi_pci_link_allocate(struct acpi_pci_link *link)
@@ -839,7 +872,7 @@ void acpi_penalize_isa_irq(int irq, int active)
 {
        if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
                acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
-                       active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
+                 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
 }
 
 bool acpi_isa_irq_available(int irq)
index 6e702ab572204deb87d96c0bd399b42d42f2a1f0..1d31c0c0fc20b48c287d2df293776cf9c1e52329 100644 (file)
@@ -137,7 +137,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info(
        u32 val;
 
        plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
-       if (IS_ERR(plat_data))
+       if (!plat_data)
                return &ahci_port_info;
 
        plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
index 6be7770f68e9a16a2aa8555727358c7cf1df5735..31c183aed368c69c9544c92ecb19853548fb8784 100644 (file)
@@ -4314,6 +4314,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         */
        { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
 
+       /*
+        * Device times out with higher max sects.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+        */
+       { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
+
        /* Devices we expect to fail diagnostics */
 
        /* Devices where NCQ should be avoided */
index bd74ee555278513118d057e832b3ec599923bb69..745489a1c86ab2c11701b124f8998126608c398a 100644 (file)
@@ -986,7 +986,7 @@ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
                 * Looks like a lot of fuss, but it avoids an unnecessary
                 * +1 usec read-after-write delay for unaffected registers.
                 */
-               laddr = (long)addr & 0xffff;
+               laddr = (unsigned long)addr & 0xffff;
                if (laddr >= 0x300 && laddr <= 0x33c) {
                        laddr &= 0x000f;
                        if (laddr == 0x4 || laddr == 0xc) {
index eda09090cb523f5ddc2a6c174502110ae8999a57..f642c4264c277bc05d98dc99eb15ac8091886ba5 100644 (file)
@@ -8,8 +8,6 @@
 #include <linux/bcma/bcma.h>
 #include <linux/delay.h>
 
-#define BCMA_CORE_SIZE         0x1000
-
 #define bcma_err(bus, fmt, ...) \
        pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
 #define bcma_warn(bus, fmt, ...) \
index 2e6d1e9c3345f190b01f9d9caf8d27e6f5f8b9ce..fcc5b4e0aef29ed8d5e863e0277687e1abe700e6 100644 (file)
@@ -207,6 +207,9 @@ struct blkfront_info
        struct blk_mq_tag_set tag_set;
        struct blkfront_ring_info *rinfo;
        unsigned int nr_rings;
+       /* Save uncomplete reqs and bios for migration. */
+       struct list_head requests;
+       struct bio_list bio_list;
 };
 
 static unsigned int nr_minors;
@@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
 {
        unsigned int i, r_index;
        struct request *req, *n;
-       struct blk_shadow *copy;
        int rc;
        struct bio *bio, *cloned_bio;
-       struct bio_list bio_list, merge_bio;
        unsigned int segs, offset;
        int pending, size;
        struct split_bio *split_bio;
-       struct list_head requests;
 
        blkfront_gather_backend_features(info);
        segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
        blk_queue_max_segments(info->rq, segs);
-       bio_list_init(&bio_list);
-       INIT_LIST_HEAD(&requests);
 
        for (r_index = 0; r_index < info->nr_rings; r_index++) {
-               struct blkfront_ring_info *rinfo;
-
-               rinfo = &info->rinfo[r_index];
-               /* Stage 1: Make a safe copy of the shadow state. */
-               copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
-                              GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
-               if (!copy)
-                       return -ENOMEM;
-
-               /* Stage 2: Set up free list. */
-               memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
-               for (i = 0; i < BLK_RING_SIZE(info); i++)
-                       rinfo->shadow[i].req.u.rw.id = i+1;
-               rinfo->shadow_free = rinfo->ring.req_prod_pvt;
-               rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+               struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
 
                rc = blkfront_setup_indirect(rinfo);
-               if (rc) {
-                       kfree(copy);
+               if (rc)
                        return rc;
-               }
-
-               for (i = 0; i < BLK_RING_SIZE(info); i++) {
-                       /* Not in use? */
-                       if (!copy[i].request)
-                               continue;
-
-                       /*
-                        * Get the bios in the request so we can re-queue them.
-                        */
-                       if (copy[i].request->cmd_flags &
-                           (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
-                               /*
-                                * Flush operations don't contain bios, so
-                                * we need to requeue the whole request
-                                */
-                               list_add(&copy[i].request->queuelist, &requests);
-                               continue;
-                       }
-                       merge_bio.head = copy[i].request->bio;
-                       merge_bio.tail = copy[i].request->biotail;
-                       bio_list_merge(&bio_list, &merge_bio);
-                       copy[i].request->bio = NULL;
-                       blk_end_request_all(copy[i].request, 0);
-               }
-
-               kfree(copy);
        }
        xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
@@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
                kick_pending_request_queues(rinfo);
        }
 
-       list_for_each_entry_safe(req, n, &requests, queuelist) {
+       list_for_each_entry_safe(req, n, &info->requests, queuelist) {
                /* Requeue pending requests (flush or discard) */
                list_del_init(&req->queuelist);
                BUG_ON(req->nr_phys_segments > segs);
@@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
        }
        blk_mq_kick_requeue_list(info->rq);
 
-       while ((bio = bio_list_pop(&bio_list)) != NULL) {
+       while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
                /* Traverse the list of pending bios and re-queue them */
                if (bio_segments(bio) > segs) {
                        /*
@@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
 {
        struct blkfront_info *info = dev_get_drvdata(&dev->dev);
        int err = 0;
+       unsigned int i, j;
 
        dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
 
+       bio_list_init(&info->bio_list);
+       INIT_LIST_HEAD(&info->requests);
+       for (i = 0; i < info->nr_rings; i++) {
+               struct blkfront_ring_info *rinfo = &info->rinfo[i];
+               struct bio_list merge_bio;
+               struct blk_shadow *shadow = rinfo->shadow;
+
+               for (j = 0; j < BLK_RING_SIZE(info); j++) {
+                       /* Not in use? */
+                       if (!shadow[j].request)
+                               continue;
+
+                       /*
+                        * Get the bios in the request so we can re-queue them.
+                        */
+                       if (shadow[j].request->cmd_flags &
+                                       (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+                               /*
+                                * Flush operations don't contain bios, so
+                                * we need to requeue the whole request
+                                */
+                               list_add(&shadow[j].request->queuelist, &info->requests);
+                               continue;
+                       }
+                       merge_bio.head = shadow[j].request->bio;
+                       merge_bio.tail = shadow[j].request->biotail;
+                       bio_list_merge(&info->bio_list, &merge_bio);
+                       shadow[j].request->bio = NULL;
+                       blk_mq_end_request(shadow[j].request, 0);
+               }
+       }
+
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
        err = negotiate_mq(info);
index 10f846cc8db172c5491ddc2508f7084ac5ef5e71..25d5906640c365fe48c618361cb23c98e6a69897 100644 (file)
@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
        struct clk_programmable *prog = to_clk_programmable(hw);
        const struct clk_programmable_layout *layout = prog->layout;
        unsigned int mask = layout->css_mask;
-       unsigned int pckr = 0;
+       unsigned int pckr = index;
 
        if (layout->have_slck_mck)
                mask |= AT91_PMC_CSSMCK_MCK;
index efba7d4dbcfc4134a88dff1efa5309f0551a8ea6..79bcb2e4206048c74add49b88f9efe97d11622ff 100644 (file)
@@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        regmap = syscon_node_to_regmap(of_get_parent(np));
-       if (!regmap) {
+       if (IS_ERR(regmap)) {
                dev_err(&pdev->dev, "failed to have parent regmap\n");
-               return -EINVAL;
+               return PTR_ERR(regmap);
        }
 
        for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
index 4bb130cd006275f5c70e27bcda7a746a00c76d34..05b3d73bfefaacdd54c6123609165d6d2d8481d8 100644 (file)
@@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
        }
 
        cclk = clk_register(NULL, &cpuclk->hw);
-       if (IS_ERR(clk)) {
+       if (IS_ERR(cclk)) {
                pr_err("%s: could not register cpuclk %s\n", __func__,  name);
-               ret = PTR_ERR(clk);
+               ret = PTR_ERR(cclk);
                goto free_rate_table;
        }
 
index bc856f21f6b20d35e2c421c51a54e1718274302e..077fcdc7908bb9f3791fe20bc60a0266327ca050 100644 (file)
@@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
 #define ROCKCHIP_MMC_DEGREE_MASK 0x3
 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
-#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
-#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
 
 #define PSECS_PER_SEC 1000000000000LL
 
@@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
+       init.flags = 0;
        init.num_parents = num_parents;
        init.parent_names = parent_names;
        init.ops = &rockchip_mmc_clk_ops;
@@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
        mmc_clock->reg = reg;
        mmc_clock->shift = shift;
 
-       /*
-        * Assert init_state to soft reset the CLKGEN
-        * for mmc tuning phase and degree
-        */
-       if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
-               writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
-                                    ROCKCHIP_MMC_INIT_STATE_RESET,
-                                    mmc_clock->shift), mmc_clock->reg);
-
        clk = clk_register(NULL, &mmc_clock->hw);
        if (IS_ERR(clk))
                kfree(mmc_clock);
index 291543f52caad63630b45cf3500d94e2bf4f9733..8059a8d3ea36430e359e4857b33b58a5f4952aec 100644 (file)
@@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
                        RK3399_CLKGATE_CON(13), 1, GFLAGS),
 
        /* perihp */
-       GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+       GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
+       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 1, GFLAGS),
        COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
 
 static const char *const rk3399_cru_critical_clocks[] __initconst = {
        "aclk_cci_pre",
+       "aclk_gic",
+       "aclk_gic_noc",
        "pclk_perilp0",
        "pclk_perilp0",
        "hclk_perilp0",
@@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np)
        ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
        if (IS_ERR(ctx)) {
                pr_err("%s: rockchip clk init failed\n", __func__);
+               iounmap(reg_base);
                return;
        }
 
@@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
        ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
        if (IS_ERR(ctx)) {
                pr_err("%s: rockchip pmu clk init failed\n", __func__);
+               iounmap(reg_base);
                return;
        }
 
index 445a7498d6df8f590ef14f447e39dc3e72392a77..9780fac6d029b9b1b85a03ce3d489b31be598281 100644 (file)
@@ -33,6 +33,8 @@ struct sun4i_a10_display_clk_data {
 
        u8      width_div;
        u8      width_mux;
+
+       u32     flags;
 };
 
 struct reset_data {
@@ -166,7 +168,7 @@ static void __init sun4i_a10_display_init(struct device_node *node,
                                     data->has_div ? &div->hw : NULL,
                                     data->has_div ? &clk_divider_ops : NULL,
                                     &gate->hw, &clk_gate_ops,
-                                    0);
+                                    data->flags);
        if (IS_ERR(clk)) {
                pr_err("%s: Couldn't register the clock\n", clk_name);
                goto free_div;
@@ -232,6 +234,7 @@ static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initcon
        .offset_rst     = 29,
        .offset_mux     = 24,
        .width_mux      = 2,
+       .flags          = CLK_SET_RATE_PARENT,
 };
 
 static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
index 98a4582de56a27c370848b240cb044345e145dc3..b6d29d1bedcaead534320d5a2cad836005821aac 100644 (file)
@@ -79,15 +79,11 @@ static int tcon_ch1_is_enabled(struct clk_hw *hw)
 static u8 tcon_ch1_get_parent(struct clk_hw *hw)
 {
        struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
-       int num_parents = clk_hw_get_num_parents(hw);
        u32 reg;
 
        reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
        reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
 
-       if (reg >= num_parents)
-               return -EINVAL;
-
        return reg;
 }
 
index 15d06fcf0b500c323e938c08de5947e394121cb9..b02f9c606e0baa59023629b3c1cba8c7ebfbc686 100644 (file)
@@ -56,11 +56,21 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
 /* proc_event_counts is used as the sequence number of the netlink message */
 static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
 
-static inline void get_seq(__u32 *ts, int *cpu)
+static inline void send_msg(struct cn_msg *msg)
 {
        preempt_disable();
-       *ts = __this_cpu_inc_return(proc_event_counts) - 1;
-       *cpu = smp_processor_id();
+
+       msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+       ((struct proc_event *)msg->data)->cpu = smp_processor_id();
+
+       /*
+        * Preemption remains disabled during send to ensure the messages are
+        * ordered according to their sequence numbers.
+        *
+        * If cn_netlink_send() fails, the data is not sent.
+        */
+       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
+
        preempt_enable();
 }
 
@@ -77,7 +87,6 @@ void proc_fork_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_FORK;
        rcu_read_lock();
@@ -92,8 +101,7 @@ void proc_fork_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       /*  If cn_netlink_send() failed, the data is not sent */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_exec_connector(struct task_struct *task)
@@ -108,7 +116,6 @@ void proc_exec_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_EXEC;
        ev->event_data.exec.process_pid = task->pid;
@@ -118,7 +125,7 @@ void proc_exec_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_id_connector(struct task_struct *task, int which_id)
@@ -150,14 +157,13 @@ void proc_id_connector(struct task_struct *task, int which_id)
                return;
        }
        rcu_read_unlock();
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
 
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_sid_connector(struct task_struct *task)
@@ -172,7 +178,6 @@ void proc_sid_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_SID;
        ev->event_data.sid.process_pid = task->pid;
@@ -182,7 +187,7 @@ void proc_sid_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -197,7 +202,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_PTRACE;
        ev->event_data.ptrace.process_pid  = task->pid;
@@ -215,7 +219,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_comm_connector(struct task_struct *task)
@@ -230,7 +234,6 @@ void proc_comm_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_COMM;
        ev->event_data.comm.process_pid  = task->pid;
@@ -241,7 +244,7 @@ void proc_comm_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_coredump_connector(struct task_struct *task)
@@ -256,7 +259,6 @@ void proc_coredump_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_COREDUMP;
        ev->event_data.coredump.process_pid = task->pid;
@@ -266,7 +268,7 @@ void proc_coredump_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_exit_connector(struct task_struct *task)
@@ -281,7 +283,6 @@ void proc_exit_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_EXIT;
        ev->event_data.exit.process_pid = task->pid;
@@ -293,7 +294,7 @@ void proc_exit_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 /*
@@ -325,7 +326,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        msg->ack = rcvd_ack + 1;
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 /**
index 3646b143bbf5342f0ac8e0a1b90a86efc7c8cef5..0bb44d5b5df49d385b5bcfd43297b750417461a7 100644 (file)
@@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = {
 static int __init cpufreq_dt_platdev_init(void)
 {
        struct device_node *np = of_find_node_by_path("/");
+       const struct of_device_id *match;
 
        if (!np)
                return -ENODEV;
 
-       if (!of_match_node(machines, np))
+       match = of_match_node(machines, np);
+       of_node_put(np);
+       if (!match)
                return -ENODEV;
 
-       of_node_put(of_root);
-
        return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
                                                               NULL, 0));
 }
index 9009295f5134247374cacf2a55717e07ff61a057..5617c7087d775ff5f4f93cc084b6bc8769be77ec 100644 (file)
@@ -2261,6 +2261,10 @@ int cpufreq_update_policy(unsigned int cpu)
         * -> ask driver for current freq and notify governors about a change
         */
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+               if (cpufreq_suspended) {
+                       ret = -EAGAIN;
+                       goto unlock;
+               }
                new_policy.cur = cpufreq_update_current_freq(policy);
                if (WARN_ON(!new_policy.cur)) {
                        ret = -EIO;
index fe9dc17ea8733b6c691502c74f9a10ad88f97c88..1fa1a32928d70a30050b30ca2d6eef4aea62d8e3 100644 (file)
@@ -1400,6 +1400,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
 {
        struct cpudata *cpu = all_cpu_data[cpu_num];
 
+       if (cpu->update_util_set)
+               return;
+
        /* Prevent intel_pstate_update_util() from using stale data. */
        cpu->sample.time = 0;
        cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
@@ -1440,8 +1443,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
-       intel_pstate_clear_update_util_hook(policy->cpu);
-
        pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
                 policy->cpuinfo.max_freq, policy->max);
 
index a4d0059e232cbd22478d29d92370eb027e8bde79..c73207abb5a44a77ed0819101ab75cd626ceb353 100644 (file)
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 
        struct cpuidle_state *target_state = &drv->states[index];
        bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
-       u64 time_start, time_end;
+       ktime_t time_start, time_end;
        s64 diff;
 
        /*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        sched_idle_set_state(target_state);
 
        trace_cpu_idle_rcuidle(index, dev->cpu);
-       time_start = local_clock();
+       time_start = ns_to_ktime(local_clock());
 
        stop_critical_timings();
        entered_state = target_state->enter(dev, drv, index);
        start_critical_timings();
 
-       time_end = local_clock();
+       time_end = ns_to_ktime(local_clock());
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
        /* The cpu is no longer idle or about to enter idle. */
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        if (!cpuidle_state_is_coupled(drv, index))
                local_irq_enable();
 
-       /*
-        * local_clock() returns the time in nanosecond, let's shift
-        * by 10 (divide by 1024) to have microsecond based time.
-        */
-       diff = (time_end - time_start) >> 10;
+       diff = ktime_us_delta(time_end, time_start);
        if (diff > INT_MAX)
                diff = INT_MAX;
 
index 6d74b91f2152807a0f65470dd799aa5080a3c111..5fc3dbb9ada0240011f58fa072e3015fb69bea7e 100644 (file)
@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
                             $(obj)/qat_rsapubkey-asn1.h
 $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
                              $(obj)/qat_rsaprivkey-asn1.h
+$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
 
 clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
 clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
index 574e87c7f2b8933c04242de63afce3ac65723cf3..9acccad26928a4d60cb4fdcbde3feab45a11c3ad 100644 (file)
@@ -781,7 +781,7 @@ static int hash_process_data(struct hash_device_data *device_data,
                                                &device_data->state);
                                memmove(req_ctx->state.buffer,
                                        device_data->state.buffer,
-                                       HASH_BLOCK_SIZE / sizeof(u32));
+                                       HASH_BLOCK_SIZE);
                                if (ret) {
                                        dev_err(device_data->dev,
                                                "%s: hash_resume_state() failed!\n",
@@ -832,7 +832,7 @@ static int hash_process_data(struct hash_device_data *device_data,
 
                        memmove(device_data->state.buffer,
                                req_ctx->state.buffer,
-                               HASH_BLOCK_SIZE / sizeof(u32));
+                               HASH_BLOCK_SIZE);
                        if (ret) {
                                dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
                                        __func__);
index 495577b6d31b33d72792bb7f0f14b3d0a2c66b7e..94ad5c0adbcbd3002e4813d70b100459cfa1ec57 100644 (file)
@@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = {
        .cra_name = "cbc(aes)",
        .cra_driver_name = "p8_aes_cbc",
        .cra_module = THIS_MODULE,
-       .cra_priority = 1000,
+       .cra_priority = 2000,
        .cra_type = &crypto_blkcipher_type,
        .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
        .cra_alignmask = 0,
index 0a3c1b04cf3c6057fe667ec95e10588906613935..38ed10d761d006eb2f9e24e1c318f98a09a4a323 100644 (file)
@@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = {
        .cra_name = "ctr(aes)",
        .cra_driver_name = "p8_aes_ctr",
        .cra_module = THIS_MODULE,
-       .cra_priority = 1000,
+       .cra_priority = 2000,
        .cra_type = &crypto_blkcipher_type,
        .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
        .cra_alignmask = 0,
index 9f4994cabcc7f44af299a8c8d3fbd90c41982fa4..b18e67d0e065d897bd34d4e2b255056293a17550 100644 (file)
@@ -141,7 +141,7 @@ my $vmr = sub {
 
 # Some ABIs specify vrsave, special-purpose register #256, as reserved
 # for system use.
-my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
 my $mtspr = sub {
     my ($f,$idx,$ra) = @_;
     if ($idx == 256 && $no_vrsave) {
index 20ce0687b111642bea425d171af06b428e619b9f..ddaee60ae52a5595063c4fac1389eef10ffcb3c0 100644 (file)
@@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        struct reservation_object *resv = exp_info->resv;
        struct file *file;
        size_t alloc_size = sizeof(struct dma_buf);
+       int ret;
 
        if (!exp_info->resv)
                alloc_size += sizeof(struct reservation_object);
@@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 
        dmabuf = kzalloc(alloc_size, GFP_KERNEL);
        if (!dmabuf) {
-               module_put(exp_info->owner);
-               return ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
+               goto err_module;
        }
 
        dmabuf->priv = exp_info->priv;
@@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
                                        exp_info->flags);
        if (IS_ERR(file)) {
-               kfree(dmabuf);
-               return ERR_CAST(file);
+               ret = PTR_ERR(file);
+               goto err_dmabuf;
        }
 
        file->f_mode |= FMODE_LSEEK;
@@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        mutex_unlock(&db_list.lock);
 
        return dmabuf;
+
+err_dmabuf:
+       kfree(dmabuf);
+err_module:
+       module_put(exp_info->owner);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(dma_buf_export);
 
index 6744d88bdea89782c524ede7d843c3d6bcd67ee2..4fb2eb7c800d8839c6329cd34589eeea4dbaa5c0 100644 (file)
@@ -2378,22 +2378,19 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
  * @num_mc: pointer to the memory controllers count, to be incremented in case
  *         of success.
  * @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- *              (as implemented, this isn't expected to work correctly in the
- *              multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- *             memory controllers.
  *
  * returns 0 in case of success or error code
  */
-static int sbridge_get_all_devices_full(u8 *num_mc,
-                                       const struct pci_id_table *table,
-                                       int allow_dups,
-                                       int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+                                       const struct pci_id_table *table)
 {
        int i, rc;
        struct pci_dev *pdev = NULL;
+       int allow_dups = 0;
+       int multi_bus = 0;
 
+       if (table->type == KNIGHTS_LANDING)
+               allow_dups = multi_bus = 1;
        while (table && table->descr) {
                for (i = 0; i < table->n_devs; i++) {
                        if (!allow_dups || i == 0 ||
@@ -2420,11 +2417,6 @@ static int sbridge_get_all_devices_full(u8 *num_mc,
        return 0;
 }
 
-#define sbridge_get_all_devices(num_mc, table) \
-               sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
-               sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
                                 struct sbridge_dev *sbridge_dev)
 {
index cebcb405812ec0c8a98f771b94f8a4c5caddabc5..d7860614f87f532caeb21145c78352d38161b2a3 100644 (file)
@@ -49,7 +49,7 @@ config GPIO_DEVRES
 
 config OF_GPIO
        def_bool y
-       depends on OF || COMPILE_TEST
+       depends on OF
 
 config GPIO_ACPI
        def_bool y
@@ -402,9 +402,12 @@ config GPIO_TB10X
        select OF_GPIO
 
 config GPIO_TEGRA
-       bool
-       default y
+       bool "NVIDIA Tegra GPIO support"
+       default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
+       depends on OF
+       help
+         Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
 config GPIO_TS4800
        tristate "TS-4800 DIO blocks and compatibles"
index e85e7539cf5d23d0eae1d4a1433b236253dd10cc..eb43ae4835c15d8a65ac8f4fde2de07c7b6e19ee 100644 (file)
@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
        return gpio % 8;
 }
 
-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
 {
-       struct sch_gpio *sch = gpiochip_get_data(gc);
        unsigned short offset, bit;
        u8 reg_val;
 
@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
        return reg_val;
 }
 
-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
                             int val)
 {
-       struct sch_gpio *sch = gpiochip_get_data(gc);
        unsigned short offset, bit;
        u8 reg_val;
 
@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GIO, 1);
+       sch_gpio_reg_set(sch, gpio_num, GIO, 1);
        spin_unlock(&sch->lock);
        return 0;
 }
 
 static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
 {
-       return sch_gpio_reg_get(gc, gpio_num, GLV);
+       struct sch_gpio *sch = gpiochip_get_data(gc);
+       return sch_gpio_reg_get(sch, gpio_num, GLV);
 }
 
 static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GLV, val);
+       sch_gpio_reg_set(sch, gpio_num, GLV, val);
        spin_unlock(&sch->lock);
 }
 
@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GIO, 0);
+       sch_gpio_reg_set(sch, gpio_num, GIO, 0);
        spin_unlock(&sch->lock);
 
        /*
@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
                 * GPIO7 is configured by the CMC as SLPIOVR
                 * Enable GPIO[9:8] core powered gpios explicitly
                 */
-               sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
-               sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
+               sch_gpio_reg_set(sch, 8, GEN, 1);
+               sch_gpio_reg_set(sch, 9, GEN, 1);
                /*
                 * SUS_GPIO[2:0] enabled by default
                 * Enable SUS_GPIO3 resume powered gpio explicitly
                 */
-               sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
+               sch_gpio_reg_set(sch, 13, GEN, 1);
                break;
 
        case PCI_DEVICE_ID_INTEL_ITC_LPC:
index ec891a27952f88e33bd9a7d6f6bfe6e5069f995a..661b0e34e0672eedba9e7896c742944aeda18477 100644 (file)
@@ -98,7 +98,6 @@ struct tegra_gpio_info {
        const struct tegra_gpio_soc_config      *soc;
        struct gpio_chip                        gc;
        struct irq_chip                         ic;
-       struct lock_class_key                   lock_class;
        u32                                     bank_count;
 };
 
@@ -547,6 +546,12 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
 };
 
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different category
+ * than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
 static int tegra_gpio_probe(struct platform_device *pdev)
 {
        const struct tegra_gpio_soc_config *config;
@@ -660,7 +665,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
 
                bank = &tgi->bank_info[GPIO_BANK(gpio)];
 
-               irq_set_lockdep_class(irq, &tgi->lock_class);
+               irq_set_lockdep_class(irq, &gpio_lock_class);
                irq_set_chip_data(irq, bank);
                irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
        }
index 3a5c7011ad3b3e832d7cce6e55ed62562bba94be..8b830996fe0212d3ae0153ae5b708a84fa53976e 100644 (file)
@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (!desc && gpio_is_valid(gpio))
                return -EPROBE_DEFER;
 
+       err = gpiod_request(desc, label);
+       if (err)
+               return err;
+
        if (flags & GPIOF_OPEN_DRAIN)
                set_bit(FLAG_OPEN_DRAIN, &desc->flags);
 
@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (flags & GPIOF_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
-       err = gpiod_request(desc, label);
-       if (err)
-               return err;
-
        if (flags & GPIOF_DIR_IN)
                err = gpiod_direction_input(desc);
        else
index 570771ed19e6f1f326575fa20cdc621ea2588432..be74bd370f1fc5443586d57076e9c04a80d74e1f 100644 (file)
@@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
                spin_lock_irqsave(&gpio_lock, flags);
        }
 done:
-       if (status < 0) {
-               /* Clear flags that might have been set by the caller before
-                * requesting the GPIO.
-                */
-               clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
-               clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
-               clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
-       }
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
-/**
- * gpiod_parse_flags - helper function to parse GPIO lookup flags
- * @desc:      gpio to be setup
- * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
- *             of_get_gpio_hog()
- *
- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
- */
-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
-{
-       if (lflags & GPIO_ACTIVE_LOW)
-               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-       if (lflags & GPIO_OPEN_DRAIN)
-               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-       if (lflags & GPIO_OPEN_SOURCE)
-               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-}
 
 /**
  * gpiod_configure_flags - helper function to configure a given GPIO
  * @desc:      gpio whose value will be assigned
  * @con_id:    function within the GPIO consumer
+ * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
+ *             of_get_gpio_hog()
  * @dflags:    gpiod_flags - optional GPIO initialization flags
  *
  * Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
  * occurred while trying to acquire the GPIO.
  */
 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
-                                enum gpiod_flags dflags)
+               unsigned long lflags, enum gpiod_flags dflags)
 {
        int status;
 
+       if (lflags & GPIO_ACTIVE_LOW)
+               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+       if (lflags & GPIO_OPEN_DRAIN)
+               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+       if (lflags & GPIO_OPEN_SOURCE)
+               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
        /* No particular flag request, return here... */
        if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
                pr_debug("no flags found for %s\n", con_id);
@@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                return desc;
        }
 
-       gpiod_parse_flags(desc, lookupflags);
-
        status = gpiod_request(desc, con_id);
        if (status < 0)
                return ERR_PTR(status);
 
-       status = gpiod_configure_flags(desc, con_id, flags);
+       status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
        if (status < 0) {
                dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
                gpiod_put(desc);
@@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
        if (IS_ERR(desc))
                return desc;
 
+       ret = gpiod_request(desc, NULL);
+       if (ret)
+               return ERR_PTR(ret);
+
        if (active_low)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
@@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
                        set_bit(FLAG_OPEN_SOURCE, &desc->flags);
        }
 
-       ret = gpiod_request(desc, NULL);
-       if (ret)
-               return ERR_PTR(ret);
-
        return desc;
 }
 EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
        chip = gpiod_to_chip(desc);
        hwnum = gpio_chip_hwgpio(desc);
 
-       gpiod_parse_flags(desc, lflags);
-
        local_desc = gpiochip_request_own_desc(chip, hwnum, name);
        if (IS_ERR(local_desc)) {
                status = PTR_ERR(local_desc);
@@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
                return status;
        }
 
-       status = gpiod_configure_flags(desc, name, dflags);
+       status = gpiod_configure_flags(desc, name, lflags, dflags);
        if (status < 0) {
                pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
                       name, chip->label, hwnum, status);
index eb09037a7161aa81e9525b55f567ca61f09ab73c..8ebc5f1eb4c0fed15da2f40b0024f74099488c54 100644 (file)
@@ -306,13 +306,16 @@ struct amdgpu_ring_funcs {
                                uint32_t oa_base, uint32_t oa_size);
        /* testing functions */
        int (*test_ring)(struct amdgpu_ring *ring);
-       int (*test_ib)(struct amdgpu_ring *ring);
+       int (*test_ib)(struct amdgpu_ring *ring, long timeout);
        /* insert NOP packets */
        void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
        /* pad the indirect buffer to the necessary number of dw */
        void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
        unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
        void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+       /* note usage for clock and power gating */
+       void (*begin_use)(struct amdgpu_ring *ring);
+       void (*end_use)(struct amdgpu_ring *ring);
 };
 
 /*
@@ -772,7 +775,6 @@ struct amdgpu_ring {
        struct amdgpu_fence_driver      fence_drv;
        struct amd_gpu_scheduler        sched;
 
-       spinlock_t              fence_lock;
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
@@ -799,8 +801,8 @@ struct amdgpu_ring {
        enum amdgpu_ring_type   type;
        char                    name[16];
        unsigned                cond_exe_offs;
-       u64                             cond_exe_gpu_addr;
-       volatile u32    *cond_exe_cpu_addr;
+       u64                     cond_exe_gpu_addr;
+       volatile u32            *cond_exe_cpu_addr;
 #if defined(CONFIG_DEBUG_FS)
        struct dentry *ent;
 #endif
@@ -1679,6 +1681,7 @@ struct amdgpu_uvd {
        struct amdgpu_ring      ring;
        struct amdgpu_irq_src   irq;
        bool                    address_64_bit;
+       bool                    use_ctx_buf;
        struct amd_sched_entity entity;
 };
 
@@ -1700,6 +1703,7 @@ struct amdgpu_vce {
        struct drm_file         *filp[AMDGPU_MAX_VCE_HANDLES];
        uint32_t                img_size[AMDGPU_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
+       struct mutex            idle_mutex;
        const struct firmware   *fw;    /* VCE firmware */
        struct amdgpu_ring      ring[AMDGPU_MAX_VCE_RINGS];
        struct amdgpu_irq_src   irq;
@@ -2242,7 +2246,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
+#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
index 9df1bcb35bf097ba9971ff8c9f57d5f3f449f037..983175363b0688df6ca181e612000173da9141bf 100644 (file)
@@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
                    le16_to_cpu(firmware_info->info.usReferenceClock);
                ppll->reference_div = 0;
 
-               if (crev < 2)
-                       ppll->pll_out_min =
-                               le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
-               else
-                       ppll->pll_out_min =
-                               le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+               ppll->pll_out_min =
+                       le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
                ppll->pll_out_max =
                    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
 
-               if (crev >= 4) {
-                       ppll->lcd_pll_out_min =
-                               le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
-                       if (ppll->lcd_pll_out_min == 0)
-                               ppll->lcd_pll_out_min = ppll->pll_out_min;
-                       ppll->lcd_pll_out_max =
-                               le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
-                       if (ppll->lcd_pll_out_max == 0)
-                               ppll->lcd_pll_out_max = ppll->pll_out_max;
-               } else {
+               ppll->lcd_pll_out_min =
+                       le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+               if (ppll->lcd_pll_out_min == 0)
                        ppll->lcd_pll_out_min = ppll->pll_out_min;
+               ppll->lcd_pll_out_max =
+                       le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+               if (ppll->lcd_pll_out_max == 0)
                        ppll->lcd_pll_out_max = ppll->pll_out_max;
-               }
 
                if (ppll->pll_out_min == 0)
                        ppll->pll_out_min = 64800;
index 0494fe7b62c0163abded8b316e5bc2e7d7e77d51..49de92600074dd3f16f117553ae44368b6b1d0d8 100644 (file)
@@ -539,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
 static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
        .switchto = amdgpu_atpx_switchto,
        .power_state = amdgpu_atpx_power_state,
-       .init = amdgpu_atpx_init,
        .get_client_id = amdgpu_atpx_get_client_id,
 };
 
@@ -574,6 +573,7 @@ static bool amdgpu_atpx_detect(void)
                printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                amdgpu_atpx_priv.atpx_detected = true;
+               amdgpu_atpx_init();
                return true;
        }
        return false;
index 99ca75baa47d7b8e018a856936fffd016d808742..2b6afe123f3d90acce64b35325da1f1e9dbd1fa6 100644 (file)
@@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
        uint16_t tmp, bios_header_start;
 
        r = amdgpu_atrm_get_bios(adev);
-       if (r == false)
+       if (!r)
                r = amdgpu_acpi_vfct_bios(adev);
-       if (r == false)
+       if (!r)
                r = igp_read_bios_from_vram(adev);
-       if (r == false)
+       if (!r)
                r = amdgpu_read_bios(adev);
-       if (r == false) {
+       if (!r) {
                r = amdgpu_read_bios_from_rom(adev);
        }
-       if (r == false) {
+       if (!r) {
                r = amdgpu_read_disabled_bios(adev);
        }
-       if (r == false) {
+       if (!r) {
                r = amdgpu_read_platform_bios(adev);
        }
-       if (r == false || adev->bios == NULL) {
+       if (!r || adev->bios == NULL) {
                DRM_ERROR("Unable to locate a BIOS ROM\n");
                adev->bios = NULL;
                return false;
index 5556ce97919921254a924aa71b5bdeba128b8b09..bc0440f7a31d3be5018c6eacb01c5a719dcc8db6 100644 (file)
@@ -752,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 
                if (!adev->pm.fw) {
                        switch (adev->asic_type) {
+                       case CHIP_TOPAZ:
+                               strcpy(fw_name, "amdgpu/topaz_smc.bin");
+                               break;
                        case CHIP_TONGA:
                                strcpy(fw_name, "amdgpu/tonga_smc.bin");
                                break;
@@ -800,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 
                info->version = adev->pm.fw_version;
                info->image_size = ucode_size;
+               info->ucode_start_address = ucode_start_address;
                info->kptr = (void *)src;
        }
        return 0;
 }
 
 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
-                               struct cgs_system_info *sys_info)
+                                       struct cgs_system_info *sys_info)
 {
        CGS_FUNC_ADEV;
 
@@ -826,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
        case CGS_SYSTEM_INFO_PCIE_MLW:
                sys_info->value = adev->pm.pcie_mlw_mask;
                break;
+       case CGS_SYSTEM_INFO_PCIE_DEV:
+               sys_info->value = adev->pdev->device;
+               break;
+       case CGS_SYSTEM_INFO_PCIE_REV:
+               sys_info->value = adev->pdev->revision;
+               break;
        case CGS_SYSTEM_INFO_CG_FLAGS:
                sys_info->value = adev->cg_flags;
                break;
@@ -911,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
        acpi_handle handle;
        struct acpi_object_list input;
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *params = NULL;
-       union acpi_object *obj = NULL;
+       union acpi_object *params, *obj;
        uint8_t name[5] = {'\0'};
-       struct cgs_acpi_method_argument *argument = NULL;
+       struct cgs_acpi_method_argument *argument;
        uint32_t i, count;
        acpi_status status;
-       int result = 0;
-       uint32_t func_no = 0xFFFFFFFF;
+       int result;
 
        handle = ACPI_HANDLE(&adev->pdev->dev);
        if (!handle)
@@ -935,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                if (info->pinput_argument == NULL)
                        return -EINVAL;
                argument = info->pinput_argument;
-               func_no = argument->value;
                for (i = 0; i < info->input_count; i++) {
                        if (((argument->type == ACPI_TYPE_STRING) ||
                             (argument->type == ACPI_TYPE_BUFFER)) &&
@@ -1004,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
 
        if (ACPI_FAILURE(status)) {
                result = -EIO;
-               goto error;
+               goto free_input;
        }
 
        /* return the output info */
@@ -1014,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                if ((obj->type != ACPI_TYPE_PACKAGE) ||
                        (obj->package.count != count)) {
                        result = -EIO;
-                       goto error;
+                       goto free_obj;
                }
                params = obj->package.elements;
        } else
@@ -1022,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
 
        if (params == NULL) {
                result = -EIO;
-               goto error;
+               goto free_obj;
        }
 
        for (i = 0; i < count; i++) {
                if (argument->type != params->type) {
                        result = -EIO;
-                       goto error;
+                       goto free_obj;
                }
                switch (params->type) {
                case ACPI_TYPE_INTEGER:
@@ -1038,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                        if ((params->string.length != argument->data_length) ||
                                (params->string.pointer == NULL)) {
                                result = -EIO;
-                               goto error;
+                               goto free_obj;
                        }
                        strncpy(argument->pointer,
                                params->string.pointer,
@@ -1047,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                case ACPI_TYPE_BUFFER:
                        if (params->buffer.pointer == NULL) {
                                result = -EIO;
-                               goto error;
+                               goto free_obj;
                        }
                        memcpy(argument->pointer,
                                params->buffer.pointer,
@@ -1060,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                params++;
        }
 
-error:
-       if (obj != NULL)
-               kfree(obj);
+       result = 0;
+free_obj:
+       kfree(obj);
+free_input:
        kfree((void *)input.pointer);
        return result;
 }
@@ -1074,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
 }
 #endif
 
-int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
+static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
                                        uint32_t acpi_method,
                                        uint32_t acpi_function,
                                        void *pinput, void *poutput,
index 614fb026436d347f62cd6debe35f3152ebd76df8..df7ab2458e5016d0da9da40b9658deb80f72d4dd 100644 (file)
@@ -1911,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
+
+       /*
+        * Most of the connector probing functions try to acquire runtime pm
+        * refs to ensure that the GPU is powered on when connector polling is
+        * performed. Since we're calling this from a runtime PM callback,
+        * trying to acquire rpm refs will cause us to deadlock.
+        *
+        * Since we're guaranteed to be holding the rpm lock, it's safe to
+        * temporarily disable the rpm helpers so this doesn't deadlock us.
+        */
+#ifdef CONFIG_PM
+       dev->dev->power.disable_depth++;
+#endif
        drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+       dev->dev->power.disable_depth--;
+#endif
 
        if (fbcon) {
                amdgpu_fbdev_set_suspend(adev, 0);
index 7dbe8d02c5a637d42f9ea345b3cfc087966555c6..76f96028313dcacececda5678977822f43c0e2c4 100644 (file)
@@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
-       };
+       }
 
        if (!repcnt)
                DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
@@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
 
-       if (amdgpu_fb->obj) {
-               drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
-       }
+       drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(amdgpu_fb);
 }
index 015f1f4aae53263a66b008aeb49b43c0fdc2ce7c..9aa533cf4ad10f79535172eaa5edafb097da0154 100644 (file)
  * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
  * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
  *           at the end of IBs.
+ * - 3.3.0 - Add VM support for UVD on supported hardware.
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       2
+#define KMS_DRIVER_MINOR       3
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
index d1558768cfb73d57e30618b971a5edb5d1f1d54f..0b109aebfec6ddbd003aac9d782d4c9a2aede8df 100644 (file)
@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
        if (seq != ring->fence_drv.sync_seq)
                amdgpu_fence_schedule_fallback(ring);
 
-       while (last_seq != seq) {
+       if (unlikely(seq == last_seq))
+               return;
+
+       last_seq &= drv->num_fences_mask;
+       seq &= drv->num_fences_mask;
+
+       do {
                struct fence *fence, **ptr;
 
-               ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+               ++last_seq;
+               last_seq &= drv->num_fences_mask;
+               ptr = &drv->fences[last_seq];
 
                /* There is always exactly one thread signaling this fence slot */
                fence = rcu_dereference_protected(*ptr, 1);
                RCU_INIT_POINTER(*ptr, NULL);
 
-               BUG_ON(!fence);
+               if (!fence)
+                       continue;
 
                r = fence_signal(fence);
                if (!r)
@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
                        BUG();
 
                fence_put(fence);
-       }
+       } while (last_seq != seq);
 }
 
 /**
index 46c3097c5224780c16868bc86aec5cbe5e79a45c..a31d7ef3032c76965e61a3f3d222938d55074286 100644 (file)
@@ -33,6 +33,8 @@
 #include "amdgpu.h"
 #include "atom.h"
 
+#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+
 /*
  * IB
  * IBs (Indirect Buffers) and areas of GPU accessible memory where
@@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
-       struct fence *hwf;
        uint64_t ctx;
 
        unsigned i;
@@ -190,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ring->funcs->emit_hdp_invalidate)
                amdgpu_ring_emit_hdp_invalidate(ring);
 
-       r = amdgpu_fence_emit(ring, &hwf);
+       r = amdgpu_fence_emit(ring, f);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                if (job && job->vm_id)
@@ -205,9 +206,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                                       AMDGPU_FENCE_FLAG_64BIT);
        }
 
-       if (f)
-               *f = fence_get(hwf);
-
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
@@ -290,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                if (!ring || !ring->ready)
                        continue;
 
-               r = amdgpu_ring_test_ib(ring);
+               r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
                if (r) {
                        ring->ready = false;
 
index aaee0c8f6731d568bb1fbffee8c139da0c309274..6674d40eb3abb251a1e9baae117f0c1a1850f940 100644 (file)
@@ -172,15 +172,13 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
        trace_amdgpu_sched_run_job(job);
        r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
                               job->sync.last_vm_update, job, &fence);
-       if (r) {
+       if (r)
                DRM_ERROR("Error scheduling IBs (%d)\n", r);
-               goto err;
-       }
 
-err:
        /* if gpu reset, hw fence will be replaced here */
        fence_put(job->fence);
-       job->fence = fence;
+       job->fence = fence_get(fence);
+       amdgpu_job_free_resources(job);
        return fence;
 }
 
index a8efbb54423f60740fb197f4bcd9a1d28761c9f1..d942654a1de0f08a4cf2970fce080cd9999b6807 100644 (file)
@@ -584,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 
        amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
 
+       amdgpu_uvd_free_handles(adev, file_priv);
+       amdgpu_vce_free_handles(adev, file_priv);
+
        amdgpu_vm_fini(adev, &fpriv->vm);
 
        idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -608,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv)
 {
-       struct amdgpu_device *adev = dev->dev_private;
-
-       amdgpu_uvd_free_handles(adev, file_priv);
-       amdgpu_vce_free_handles(adev, file_priv);
 }
 
 /*
index 3b885e3e9b56c006e3881bc966b3f969c8c244cb..85aeb0a804bbcef1a5860f7bcca3ba2c822386b8 100644 (file)
@@ -75,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
 
        ring->count_dw = ndw;
        ring->wptr_old = ring->wptr;
+
+       if (ring->funcs->begin_use)
+               ring->funcs->begin_use(ring);
+
        return 0;
 }
 
@@ -127,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
 
        mb();
        amdgpu_ring_set_wptr(ring);
+
+       if (ring->funcs->end_use)
+               ring->funcs->end_use(ring);
 }
 
 /**
@@ -139,6 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
 void amdgpu_ring_undo(struct amdgpu_ring *ring)
 {
        ring->wptr = ring->wptr_old;
+
+       if (ring->funcs->end_use)
+               ring->funcs->end_use(ring);
 }
 
 /**
@@ -198,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
        ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
        ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
 
-       spin_lock_init(&ring->fence_lock);
        r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
        if (r) {
                dev_err(adev->dev, "failed initializing fences (%d).\n", r);
index 499803f3ce3acd5f4c0f5a43a491c83a27b6f980..0d8d65eb46cde8206824a12619eeeb5177a7cc16 100644 (file)
@@ -149,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
 
 
 TRACE_EVENT(amdgpu_vm_grab_id,
-           TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
-                    uint64_t pd_addr),
-           TP_ARGS(vm, ring, vmid, pd_addr),
+           TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
+           TP_ARGS(vm, ring, job),
            TP_STRUCT__entry(
                             __field(struct amdgpu_vm *, vm)
                             __field(u32, ring)
                             __field(u32, vmid)
                             __field(u64, pd_addr)
+                            __field(u32, needs_flush)
                             ),
 
            TP_fast_assign(
                           __entry->vm = vm;
                           __entry->ring = ring;
-                          __entry->vmid = vmid;
-                          __entry->pd_addr = pd_addr;
+                          __entry->vmid = job->vm_id;
+                          __entry->pd_addr = job->vm_pd_addr;
+                          __entry->needs_flush = job->vm_needs_flush;
                           ),
-           TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
-                     __entry->ring, __entry->vmid, __entry->pd_addr)
+           TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
+                     __entry->vm, __entry->ring, __entry->vmid,
+                     __entry->pd_addr, __entry->needs_flush)
 );
 
 TRACE_EVENT(amdgpu_vm_bo_map,
index e19520c4b4b6e3cd6bd4285c008885998e87187f..b11f4e8868d7652503713b508da2c8820a99c36b 100644 (file)
 #include "uvd/uvd_4_2_d.h"
 
 /* 1 second timeout */
-#define UVD_IDLE_TIMEOUT_MS    1000
+#define UVD_IDLE_TIMEOUT       msecs_to_jiffies(1000)
+
+/* Firmware versions for VI */
+#define FW_1_65_10     ((1 << 24) | (65 << 16) | (10 << 8))
+#define FW_1_87_11     ((1 << 24) | (87 << 16) | (11 << 8))
+#define FW_1_87_12     ((1 << 24) | (87 << 16) | (12 << 8))
+#define FW_1_37_15     ((1 << 24) | (37 << 16) | (15 << 8))
+
 /* Polaris10/11 firmware version */
-#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
+#define FW_1_66_16     ((1 << 24) | (66 << 16) | (16 << 8))
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
@@ -246,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
                adev->uvd.address_64_bit = true;
 
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+               adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
+               break;
+       case CHIP_CARRIZO:
+               adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
+               break;
+       case CHIP_FIJI:
+               adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
+               break;
+       case CHIP_STONEY:
+               adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
+               break;
+       default:
+               adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
+       }
+
        return 0;
 }
 
@@ -346,8 +369,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
                if (handle != 0 && adev->uvd.filp[i] == filp) {
                        struct fence *fence;
 
-                       amdgpu_uvd_note_usage(adev);
-
                        r = amdgpu_uvd_get_destroy_msg(ring, handle,
                                                       false, &fence);
                        if (r) {
@@ -438,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
        unsigned fs_in_mb = width_in_mb * height_in_mb;
 
        unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
-       unsigned min_ctx_size = 0;
+       unsigned min_ctx_size = ~0;
 
        image_size = width * height;
        image_size += image_size / 2;
@@ -557,7 +578,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
                /* reference picture buffer */
                min_dpb_size = image_size * num_dpb_buffer;
 
-               if (adev->asic_type < CHIP_POLARIS10){
+               if (!adev->uvd.use_ctx_buf){
                        /* macroblock context buffer */
                        min_dpb_size +=
                                width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -662,7 +683,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
                }
 
                DRM_ERROR("No more free UVD handles!\n");
-               return -EINVAL;
+               return -ENOSPC;
 
        case 1:
                /* it's a decode msg, calc buffer sizes */
@@ -913,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
                return -EINVAL;
        }
 
-       amdgpu_uvd_note_usage(ctx.parser->adev);
-
        return 0;
 }
 
@@ -968,7 +987,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
        if (direct) {
                r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-               job->fence = f;
+               job->fence = fence_get(f);
                if (r)
                        goto err_free;
 
@@ -1110,16 +1129,14 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
                        amdgpu_asic_set_uvd_clocks(adev, 0, 0);
                }
        } else {
-               schedule_delayed_work(&adev->uvd.idle_work,
-                                     msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
        }
 }
 
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
        bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
-       set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
-                                           msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
 
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
@@ -1129,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
                }
        }
 }
+
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+{
+       schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_uvd_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct fence *fence;
+       long r;
+
+       r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+               goto error;
+       }
+
+       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+               goto error;
+       }
+
+       r = fence_wait_timeout(fence, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out.\n");
+               r = -ETIMEDOUT;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+       } else {
+               DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
+               r = 0;
+       }
+
+error:
+       fence_put(fence);
+       return r;
+}
index 9a3b449081a72fd9c9ac79c9b3c9c5e28614d4ec..c850009602d176215b14dc2b173a0f6536a2860c 100644 (file)
@@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
                             struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 
 #endif
index 875626a2eccbd912371e12f4f09e4573521e427c..05865ce35351919cda021e1d5d121dabeda6758e 100644 (file)
@@ -36,7 +36,7 @@
 #include "cikd.h"
 
 /* 1 second timeout */
-#define VCE_IDLE_TIMEOUT_MS    1000
+#define VCE_IDLE_TIMEOUT       msecs_to_jiffies(1000)
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
        unsigned ucode_version, version_major, version_minor, binary_id;
        int i, r;
 
-       INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
-
        switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
        case CHIP_BONAIRE:
@@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
                adev->vce.filp[i] = NULL;
        }
 
+       INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
+       mutex_init(&adev->vce.idle_mutex);
+
        return 0;
 }
 
@@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
        amdgpu_ring_fini(&adev->vce.ring[1]);
 
        release_firmware(adev->vce.fw);
+       mutex_destroy(&adev->vce.idle_mutex);
 
        return 0;
 }
@@ -310,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
                        amdgpu_asic_set_vce_clocks(adev, 0, 0);
                }
        } else {
-               schedule_delayed_work(&adev->vce.idle_work,
-                                     msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+               schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
        }
 }
 
 /**
- * amdgpu_vce_note_usage - power up VCE
+ * amdgpu_vce_ring_begin_use - power up VCE
  *
- * @adev: amdgpu_device pointer
+ * @ring: amdgpu ring
  *
  * Make sure VCE is powerd up when we want to use it
  */
-static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
 {
-       bool streams_changed = false;
-       bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
-       set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
-                                           msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
-
-       if (adev->pm.dpm_enabled) {
-               /* XXX figure out if the streams changed */
-               streams_changed = false;
-       }
+       struct amdgpu_device *adev = ring->adev;
+       bool set_clocks;
 
-       if (set_clocks || streams_changed) {
+       mutex_lock(&adev->vce.idle_mutex);
+       set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
+       if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_vce(adev, true);
                } else {
                        amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
                }
        }
+       mutex_unlock(&adev->vce.idle_mutex);
+}
+
+/**
+ * amdgpu_vce_ring_end_use - power VCE down
+ *
+ * @ring: amdgpu ring
+ *
+ * Schedule work to power VCE down again
+ */
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
+{
+       schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
 }
 
 /**
@@ -357,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
        int i, r;
        for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
                uint32_t handle = atomic_read(&adev->vce.handles[i]);
+
                if (!handle || adev->vce.filp[i] != filp)
                        continue;
 
-               amdgpu_vce_note_usage(adev);
-
                r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
                if (r)
                        DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
@@ -437,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
                ib->ptr[i] = 0x0;
 
        r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-       job->fence = f;
+       job->fence = fence_get(f);
        if (r)
                goto err;
 
@@ -469,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct fence *f = NULL;
-       uint64_t dummy;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -477,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
 
        /* stitch together an VCE destroy msg */
        ib->length_dw = 0;
@@ -485,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
        ib->ptr[ib->length_dw++] = handle;
 
-       ib->ptr[ib->length_dw++] = 0x00000014; /* len */
-       ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
-       ib->ptr[ib->length_dw++] = 0x00000001;
+       ib->ptr[ib->length_dw++] = 0x00000020; /* len */
+       ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+       ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
+       ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
+       ib->ptr[ib->length_dw++] = 0x00000000;
+       ib->ptr[ib->length_dw++] = 0x00000000;
+       ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
+       ib->ptr[ib->length_dw++] = 0x00000000;
 
        ib->ptr[ib->length_dw++] = 0x00000008; /* len */
        ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
@@ -499,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        if (direct) {
                r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-               job->fence = f;
+               job->fence = fence_get(f);
                if (r)
                        goto err;
 
@@ -580,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
  * we we don't have another free session index.
  */
 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
-                                     uint32_t handle, bool *allocated)
+                                     uint32_t handle, uint32_t *allocated)
 {
        unsigned i;
 
-       *allocated = false;
-
        /* validate the handle */
        for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
                if (atomic_read(&p->adev->vce.handles[i]) == handle) {
@@ -602,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
                if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
                        p->adev->vce.filp[i] = p->filp;
                        p->adev->vce.img_size[i] = 0;
-                       *allocated = true;
+                       *allocated |= 1 << i;
                        return i;
                }
        }
@@ -622,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
        struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
        unsigned fb_idx = 0, bs_idx = 0;
        int session_idx = -1;
-       bool destroyed = false;
-       bool created = false;
-       bool allocated = false;
+       uint32_t destroyed = 0;
+       uint32_t created = 0;
+       uint32_t allocated = 0;
        uint32_t tmp, handle = 0;
        uint32_t *size = &tmp;
        int i, r = 0, idx = 0;
 
-       amdgpu_vce_note_usage(p->adev);
-
        while (idx < ib->length_dw) {
                uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
                uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
@@ -641,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                        goto out;
                }
 
-               if (destroyed) {
-                       DRM_ERROR("No other command allowed after destroy!\n");
-                       r = -EINVAL;
-                       goto out;
-               }
-
                switch (cmd) {
-               case 0x00000001: // session
+               case 0x00000001: /* session */
                        handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
                        session_idx = amdgpu_vce_validate_handle(p, handle,
                                                                 &allocated);
-                       if (session_idx < 0)
-                               return session_idx;
+                       if (session_idx < 0) {
+                               r = session_idx;
+                               goto out;
+                       }
                        size = &p->adev->vce.img_size[session_idx];
                        break;
 
-               case 0x00000002: // task info
+               case 0x00000002: /* task info */
                        fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
                        bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
                        break;
 
-               case 0x01000001: // create
-                       created = true;
-                       if (!allocated) {
+               case 0x01000001: /* create */
+                       created |= 1 << session_idx;
+                       if (destroyed & (1 << session_idx)) {
+                               destroyed &= ~(1 << session_idx);
+                               allocated |= 1 << session_idx;
+
+                       } else if (!(allocated & (1 << session_idx))) {
                                DRM_ERROR("Handle already in use!\n");
                                r = -EINVAL;
                                goto out;
@@ -675,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                                8 * 3 / 2;
                        break;
 
-               case 0x04000001: // config extension
-               case 0x04000002: // pic control
-               case 0x04000005: // rate control
-               case 0x04000007: // motion estimation
-               case 0x04000008: // rdo
-               case 0x04000009: // vui
-               case 0x05000002: // auxiliary buffer
+               case 0x04000001: /* config extension */
+               case 0x04000002: /* pic control */
+               case 0x04000005: /* rate control */
+               case 0x04000007: /* motion estimation */
+               case 0x04000008: /* rdo */
+               case 0x04000009: /* vui */
+               case 0x05000002: /* auxiliary buffer */
                        break;
 
-               case 0x03000001: // encode
+               case 0x03000001: /* encode */
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
                                                *size, 0);
                        if (r)
@@ -696,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                                goto out;
                        break;
 
-               case 0x02000001: // destroy
-                       destroyed = true;
+               case 0x02000001: /* destroy */
+                       destroyed |= 1 << session_idx;
                        break;
 
-               case 0x05000001: // context buffer
+               case 0x05000001: /* context buffer */
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
                                                *size * 2, 0);
                        if (r)
                                goto out;
                        break;
 
-               case 0x05000004: // video bitstream buffer
+               case 0x05000004: /* video bitstream buffer */
                        tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
                                                tmp, bs_idx);
@@ -715,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                                goto out;
                        break;
 
-               case 0x05000005: // feedback buffer
+               case 0x05000005: /* feedback buffer */
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
                                                4096, fb_idx);
                        if (r)
@@ -737,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                idx += len / 4;
        }
 
-       if (allocated && !created) {
+       if (allocated & ~created) {
                DRM_ERROR("New session without create command!\n");
                r = -ENOENT;
        }
 
 out:
-       if ((!r && destroyed) || (r && allocated)) {
-               /*
-                * IB contains a destroy msg or we have allocated an
-                * handle and got an error, anyway free the handle
-                */
-               for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
-                       atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+       if (!r) {
+               /* No error, free all destroyed handle slots */
+               tmp = destroyed;
+       } else {
+               /* Error during parsing, free all allocated handle slots */
+               tmp = allocated;
        }
 
+       for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+               if (tmp & (1 << i))
+                       atomic_set(&p->adev->vce.handles[i], 0);
+
        return r;
 }
 
@@ -837,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
  * @ring: the engine to test on
  *
  */
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct fence *fence = NULL;
-       int r;
+       long r;
 
        /* skip vce ring1 ib test for now, since it's not reliable */
        if (ring == &ring->adev->vce.ring[1])
@@ -848,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
 
        r = amdgpu_vce_get_create_msg(ring, 1, NULL);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
                goto error;
        }
 
        r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
                goto error;
        }
 
-       r = fence_wait(fence, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+       r = fence_wait_timeout(fence, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out.\n");
+               r = -ETIMEDOUT;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
        } else {
                DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
        }
 error:
        fence_put(fence);
index f40cf761c66f45f40fe5cf1d42646852d82101fb..63f83d0d985c0fc981de361ee746865c079373b5 100644 (file)
@@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
 
 #endif
index 2f8496d48c94d9d8a632795c7cca78b5517fd16d..8e642fc48df45c665053a3b835d0abe803d7124b 100644 (file)
@@ -195,6 +195,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_job *job)
 {
        struct amdgpu_device *adev = ring->adev;
+       uint64_t fence_context = adev->fence_context + ring->idx;
        struct fence *updates = sync->last_vm_update;
        struct amdgpu_vm_id *id, *idle;
        struct fence **fences;
@@ -254,7 +255,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        i = ring->idx;
        do {
                struct fence *flushed;
-               bool same_ring = ring->idx == i;
 
                id = vm->ids[i++];
                if (i == AMDGPU_MAX_RINGS)
@@ -272,8 +272,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                if (job->vm_pd_addr != id->pd_gpu_addr)
                        continue;
 
-               if (!same_ring &&
-                   (!id->last_flush || !fence_is_signaled(id->last_flush)))
+               if (!id->last_flush)
+                       continue;
+
+               if (id->last_flush->context != fence_context &&
+                   !fence_is_signaled(id->last_flush))
                        continue;
 
                flushed  = id->flushed_updates;
@@ -294,7 +297,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
                job->vm_id = id - adev->vm_manager.ids;
                job->vm_needs_flush = false;
-               trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
+               trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 
                mutex_unlock(&adev->vm_manager.lock);
                return 0;
@@ -325,7 +328,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        vm->ids[ring->idx] = id;
 
        job->vm_id = id - adev->vm_manager.ids;
-       trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
+       trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 
 error:
        mutex_unlock(&adev->vm_manager.lock);
index 48b6bd671cda80fbed1fa1dc6efe85ddc686eec3..c32eca26155c7be0c57d53c2763b015676269e41 100644 (file)
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
                        if (dig->backlight_level == 0)
                                amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
                                                                       ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
index 13cdb01e9b45022aebf6d052523d718d52ceaa91..bc56c8a181e628b575861233bd705dd986f9f061 100644 (file)
@@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+       PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+       args.ucRegIndex = offset;
+       args.lpI2CDataOut = data;
+       args.ucFlag = 1;
+       args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+       args.ucTransBytes = 1;
+       args.ucSlaveAddr = slave_addr;
+       args.ucLineNumber = line_number;
+
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
index d6128d9de56e4b9db096de8e07d30d51e2e4a4f2..251aaf41f65d5a277104c3b9fda29fc71bbca680 100644 (file)
@@ -27,5 +27,7 @@
 int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
                      struct i2c_msg *msgs, int num);
 u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+               u8 slave_addr, u8 line_number, u8 offset, u8 data);
 
 #endif
index 5c33ed862695074112f6e9621917c2d598e3a620..e2f0e5d58d5cc05abd29ed65037e38a060fd3145 100644 (file)
@@ -86,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
        { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
 };
 
+#if 0
 static const struct ci_pt_defaults defaults_bonaire_pro =
 {
        1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
        { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
        { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
 };
+#endif
 
 static const struct ci_pt_defaults defaults_saturn_xt =
 {
@@ -100,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt =
        { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
 };
 
+#if 0
 static const struct ci_pt_defaults defaults_saturn_pro =
 {
        1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
        { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
        { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
 };
+#endif
 
 static const struct ci_pt_config_reg didt_config_ci[] =
 {
@@ -3032,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
 
        if (pi->mclk_stutter_mode_threshold &&
            (memory_clock <= pi->mclk_stutter_mode_threshold) &&
-           (pi->uvd_enabled == false) &&
+           (!pi->uvd_enabled) &&
            (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
            (adev->pm.dpm.new_active_crtc_count <= 2))
                memory_level->StutterEnable = true;
index a7de4d18ac946c2f8da67ba16664ed6f956c985f..4efc901f658c0e60d1d6e96a11bb083e761c4140 100644 (file)
@@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
        uint32_t tmp;
 
        tmp = RREG32(mmCONFIG_CNTL);
-       if (state == false)
+       if (!state)
                tmp |= CONFIG_CNTL__VGA_DIS_MASK;
        else
                tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
index 46aca16a40aaa4d1abe63693da45ef524852541a..ee6466912497b7cfc608db429e144d3a2dc3387e 100644 (file)
@@ -354,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
        u32 me_cntl;
        int i;
 
-       if (enable == false) {
+       if (!enable) {
                cik_sdma_gfx_stop(adev);
                cik_sdma_rlc_stop(adev);
        }
@@ -617,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
  * Test a simple IB in the DMA ring (CIK).
  * Returns 0 on success, error on failure.
  */
-static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
+static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
-       unsigned i;
        unsigned index;
-       int r;
        u32 tmp = 0;
        u64 gpu_addr;
+       long r;
 
        r = amdgpu_wb_get(adev, &index);
        if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
                return r;
        }
 
@@ -640,11 +639,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err0;
        }
 
-       ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+       ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+                               SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
        ib.ptr[1] = lower_32_bits(gpu_addr);
        ib.ptr[2] = upper_32_bits(gpu_addr);
        ib.ptr[3] = 1;
@@ -654,28 +654,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
        if (r)
                goto err1;
 
-       r = fence_wait(f, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
                goto err1;
-       }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = le32_to_cpu(adev->wb.wb[index]);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
+       }
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
        } else {
                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
                r = -EINVAL;
        }
 
 err1:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err0:
index 8ba07e79d4cb17317cebd37811b24bcf046ef847..2a11413ed54a763cb1f2f0befa39eaacccc109c9 100644 (file)
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
        pi->mgcg_cgtt_local1 = 0x0;
        pi->clock_slow_down_step = 25000;
        pi->skip_clock_slow_down = 1;
-       pi->enable_nb_ps_policy = 0;
+       pi->enable_nb_ps_policy = false;
        pi->caps_power_containment = true;
        pi->caps_cac = true;
        pi->didt_enabled = false;
index b336c918d6a7984fcb8bb6060bb2339ebb78e636..b3e19ba4c57f4b0f5e1853f476c90b899e3888bb 100644 (file)
@@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
 {
        if (!fiji_is_smc_ram_running(adev))
        {
-               return -EINVAL;;
+               return -EINVAL;
        }
 
        if (wait_smu_response(adev)) {
index f6bd9465dbdcdd9eee4062ffae2ead68dc6070bc..d869d058ef24f1324ff861db8318e5a3c12f8081 100644 (file)
@@ -2105,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
  * Provides a basic gfx ring test to verify that IBs are working.
  * Returns 0 on success, error on failure.
  */
-static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
        uint32_t scratch;
        uint32_t tmp = 0;
-       unsigned i;
-       int r;
+       long r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err1;
        }
        ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -2136,21 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
        if (r)
                goto err2;
 
-       r = fence_wait(f, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
                goto err2;
-       }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(scratch);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
+       }
+       tmp = RREG32(scratch);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
        } else {
                DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
@@ -2158,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
        }
 
 err2:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err1:
index c30b6ac25d8984f13ba70c1f962964a280373fe7..bff8668e9e6d466e059d70f719c0a818235040dc 100644 (file)
@@ -28,6 +28,7 @@
 #include "vid.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 #include "clearstate_vi.h"
 
 #include "gmc/gmc_8_2_d.h"
@@ -47,6 +48,8 @@
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 
+#include "smu/smu_7_1_3_d.h"
+
 #define GFX8_NUM_GFX_RINGS     1
 #define GFX8_NUM_COMPUTE_RINGS 8
 
@@ -282,6 +285,7 @@ static const u32 golden_settings_polaris11_a11[] =
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
        mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
        mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+       mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 };
 
 static const u32 polaris11_golden_common_all[] =
@@ -312,6 +316,7 @@ static const u32 golden_settings_polaris10_a11[] =
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
        mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+       mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 };
 
 static const u32 polaris10_golden_common_all[] =
@@ -693,6 +698,11 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
                amdgpu_program_register_sequence(adev,
                                                 polaris10_golden_common_all,
                                                 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
+               WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+               if (adev->pdev->revision == 0xc7) {
+                       amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+                       amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+               }
                break;
        case CHIP_CARRIZO:
                amdgpu_program_register_sequence(adev,
@@ -777,26 +787,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
        return r;
 }
 
-static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
        uint32_t scratch;
        uint32_t tmp = 0;
-       unsigned i;
-       int r;
+       long r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err1;
        }
        ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -808,28 +817,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
        if (r)
                goto err2;
 
-       r = fence_wait(f, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out.\n");
+               r = -ETIMEDOUT;
                goto err2;
-       }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(scratch);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
+       }
+       tmp = RREG32(scratch);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
        } else {
                DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
                r = -EINVAL;
        }
 err2:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err1:
@@ -1719,7 +1725,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
                RREG32(sec_ded_counter_registers[i]);
 
 fail:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 
index 825ccd63f2dc313ced6199d476f0b96d8b8cdba6..2f078ad6095caa946393d0de03b40d83e5f622b4 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/firmware.h>
 #include "drmP.h"
 #include "amdgpu.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
 
 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
 
index 52ee08193295f434c46fb7f89e082b20df46abfd..211839913728e72e496c7edffedd57c51aaf28ff 100644 (file)
@@ -25,7 +25,7 @@
 #include "drmP.h"
 #include "amdgpu.h"
 #include "ppsmc.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
 #include "smu_ucode_xfer_vi.h"
 #include "amdgpu_ucode.h"
 
@@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
                                                   PPSMC_Msg msg)
 {
        if (!iceland_is_smc_ram_running(adev))
-               return -EINVAL;;
+               return -EINVAL;
 
        if (wait_smu_response(adev)) {
                DRM_ERROR("Failed to send previous message\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
new file mode 100644 (file)
index 0000000..5983e31
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef ICELAND_SMUM_H
+#define ICELAND_SMUM_H
+
+#include "ppsmc.h"
+
+extern int iceland_smu_init(struct amdgpu_device *adev);
+extern int iceland_smu_fini(struct amdgpu_device *adev);
+extern int iceland_smu_start(struct amdgpu_device *adev);
+
+struct iceland_smu_private_data
+{
+       uint8_t *header;
+       uint8_t *mec_image;
+       uint32_t header_addr_high;
+       uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h
deleted file mode 100644 (file)
index 1e0769e..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef ICELAND_SMUMGR_H
-#define ICELAND_SMUMGR_H
-
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-       uint8_t *header;
-       uint8_t *mec_image;
-       uint32_t header_addr_high;
-       uint32_t header_addr_low;
-};
-
-#endif
index 5a0e245771ce4accc8d7911ca061fc9658a10d15..a845e883f5fa560a18340517edbaa2df8080cf30 100644 (file)
@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
        vid_mapping_table->num_entries = i;
 }
 
+#if 0
 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
 {
        {  0,       4,        1    },
@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
 {
        { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
 };
+#endif
 
 static const struct kv_pt_config_reg didt_config_kv[] =
 {
index 7837f2ecc3570167392199e2fa886cda6bc86cc0..8463245f424ff75da034dea697cfc9e8f759a29b 100644 (file)
@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_StartFanControl               ((uint8_t)0x5B)
 #define PPSMC_StopFanControl                ((uint8_t)0x5C)
 #define PPSMC_MSG_NoDisplay                 ((uint8_t)0x5D)
+#define PPSMC_NoDisplay                     ((uint8_t)0x5D)
 #define PPSMC_MSG_HasDisplay                ((uint8_t)0x5E)
+#define PPSMC_HasDisplay                    ((uint8_t)0x5E)
 #define PPSMC_MSG_UVDPowerOFF               ((uint8_t)0x60)
 #define PPSMC_MSG_UVDPowerON                ((uint8_t)0x61)
 #define PPSMC_MSG_EnableULV                 ((uint8_t)0x62)
@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_DisableDTE                ((uint8_t)0x88)
 #define PPSMC_MSG_ThrottleOVRDSCLKDS        ((uint8_t)0x96)
 #define PPSMC_MSG_CancelThrottleOVRDSCLKDS  ((uint8_t)0x97)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt   ((uint16_t) 0x149)
 
 /* CI/KV/KB */
 #define PPSMC_MSG_UVDDPM_SetEnabledMask       ((uint16_t) 0x12D)
@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
 #define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
 #define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+#define PPSMC_MSG_SetFanRpmMax                ((uint16_t) 0x205)
 
 #define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
 #define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
index ac3730a6e49f9cfe62e803b4038d83575535e453..1351c7e834a21653a358ad3578dad91f60de9da9 100644 (file)
@@ -393,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       if (enable == false) {
+       if (!enable) {
                sdma_v2_4_gfx_stop(adev);
                sdma_v2_4_rlc_stop(adev);
        }
@@ -567,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
 {
        int r;
 
-       if (!adev->firmware.smu_load) {
-               r = sdma_v2_4_load_microcode(adev);
-               if (r)
-                       return r;
-       } else {
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA0);
-               if (r)
-                       return -EINVAL;
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA1);
-               if (r)
-                       return -EINVAL;
+       if (!adev->pp_enabled) {
+               if (!adev->firmware.smu_load) {
+                       r = sdma_v2_4_load_microcode(adev);
+                       if (r)
+                               return r;
+               } else {
+                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                       AMDGPU_UCODE_ID_SDMA0);
+                       if (r)
+                               return -EINVAL;
+                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                       AMDGPU_UCODE_ID_SDMA1);
+                       if (r)
+                               return -EINVAL;
+               }
        }
 
        /* halt the engine before programing */
@@ -666,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
  * Test a simple IB in the DMA ring (VI).
  * Returns 0 on success, error on failure.
  */
-static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
-       unsigned i;
        unsigned index;
-       int r;
        u32 tmp = 0;
        u64 gpu_addr;
+       long r;
 
        r = amdgpu_wb_get(adev, &index);
        if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
                return r;
        }
 
@@ -689,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err0;
        }
 
@@ -708,28 +709,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
        if (r)
                goto err1;
 
-       r = fence_wait(f, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
                goto err1;
-       }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = le32_to_cpu(adev->wb.wb[index]);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
+       } else if (r) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
+       }
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
        } else {
                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
                r = -EINVAL;
        }
 
 err1:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err0:
index f00db6f4c04cd6c283f1176364c8a0dcdbba3f74..653ce5ed55aef6641a83649cd1447a06fcf57437 100644 (file)
@@ -604,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       if (enable == false) {
+       if (!enable) {
                sdma_v3_0_gfx_stop(adev);
                sdma_v3_0_rlc_stop(adev);
        }
@@ -896,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
  * Test a simple IB in the DMA ring (VI).
  * Returns 0 on success, error on failure.
  */
-static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
-       unsigned i;
        unsigned index;
-       int r;
        u32 tmp = 0;
        u64 gpu_addr;
+       long r;
 
        r = amdgpu_wb_get(adev, &index);
        if (r) {
-               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
                return r;
        }
 
@@ -919,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, 256, &ib);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err0;
        }
 
@@ -938,27 +937,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
        if (r)
                goto err1;
 
-       r = fence_wait(f, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
                goto err1;
-       }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = le32_to_cpu(adev->wb.wb[index]);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err1;
+       }
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
        } else {
                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
                r = -EINVAL;
        }
 err1:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err0:
index 083893dd68c063db6059fee335a8667247132b14..940de1836f8f0112cf01989c34ced549ce90cc0a 100644 (file)
@@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
 {
        if (!tonga_is_smc_ram_running(adev))
        {
-               return -EINVAL;;
+               return -EINVAL;
        }
 
        if (wait_smu_response(adev)) {
index 416c8567d3ed1054c2b4be75b8f83d57b9da5604..132e613ed67425132ecf8a770ccb4888d8525b81 100644 (file)
@@ -526,49 +526,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
-/**
- * uvd_v4_2_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
-{
-       struct amdgpu_device *adev = ring->adev;
-       struct fence *fence = NULL;
-       int r;
-
-       r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
-               return r;
-       }
-
-       r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
-               goto error;
-       }
-
-       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
-               goto error;
-       }
-
-       r = fence_wait(fence, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-               goto error;
-       }
-       DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
-error:
-       fence_put(fence);
-       amdgpu_asic_set_uvd_clocks(adev, 0, 0);
-       return r;
-}
-
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -794,9 +751,11 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
        .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
        .test_ring = uvd_v4_2_ring_test_ring,
-       .test_ib = uvd_v4_2_ring_test_ib,
+       .test_ib = amdgpu_uvd_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_uvd_ring_begin_use,
+       .end_use = amdgpu_uvd_ring_end_use,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
index dd636c4c4b089d68593f1ffbddd05fab452d662c..101de136ba63aacdf1034e6fd5237bf0cbe23011 100644 (file)
@@ -577,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
-/**
- * uvd_v5_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
-{
-       struct amdgpu_device *adev = ring->adev;
-       struct fence *fence = NULL;
-       int r;
-
-       r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
-               return r;
-       }
-
-       r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
-               goto error;
-       }
-
-       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
-               goto error;
-       }
-
-       r = fence_wait(fence, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-               goto error;
-       }
-       DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
-error:
-       fence_put(fence);
-       amdgpu_asic_set_uvd_clocks(adev, 0, 0);
-       return r;
-}
-
 static bool uvd_v5_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -845,9 +802,11 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
        .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
        .test_ring = uvd_v5_0_ring_test_ring,
-       .test_ib = uvd_v5_0_ring_test_ib,
+       .test_ib = amdgpu_uvd_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_uvd_ring_begin_use,
+       .end_use = amdgpu_uvd_ring_end_use,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
index 07e9a987fbee413f6d21d9323dbd8045b7ac4d97..7f21102bfb99d9145ee118d6800e8abb48d1a934 100644 (file)
@@ -34,6 +34,7 @@
 #include "smu/smu_7_1_3_d.h"
 #include "smu/smu_7_1_3_sh_mask.h"
 #include "bif/bif_5_1_d.h"
+#include "gmc/gmc_8_1_d.h"
 #include "vi.h"
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -672,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
                                  unsigned vm_id, bool ctx_switch)
 {
+       amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
+       amdgpu_ring_write(ring, vm_id);
+
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
@@ -680,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
-/**
- * uvd_v6_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
+static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                        unsigned vm_id, uint64_t pd_addr)
 {
-       struct fence *fence = NULL;
-       int r;
+       uint32_t reg;
 
-       r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
-               goto error;
-       }
+       if (vm_id < 8)
+               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+       else
+               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
 
-       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
-       if (r) {
-               DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
-               goto error;
-       }
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, reg << 2);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, pd_addr >> 12);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0x8);
 
-       r = fence_wait(fence, false);
-       if (r) {
-               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-               goto error;
-       }
-       DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
-error:
-       fence_put(fence);
-       return r;
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0xC);
+}
+
+static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+       amdgpu_ring_write(ring, 0xffffffff); /* mask */
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0xE);
 }
 
 static bool uvd_v6_0_is_idle(void *handle)
@@ -951,7 +971,7 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
        .set_powergating_state = uvd_v6_0_set_powergating_state,
 };
 
-static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
        .set_wptr = uvd_v6_0_ring_set_wptr,
@@ -961,14 +981,41 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
        .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
        .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
        .test_ring = uvd_v6_0_ring_test_ring,
-       .test_ib = uvd_v6_0_ring_test_ib,
+       .test_ib = amdgpu_uvd_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_uvd_ring_begin_use,
+       .end_use = amdgpu_uvd_ring_end_use,
+};
+
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+       .get_rptr = uvd_v6_0_ring_get_rptr,
+       .get_wptr = uvd_v6_0_ring_get_wptr,
+       .set_wptr = uvd_v6_0_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = uvd_v6_0_ring_emit_ib,
+       .emit_fence = uvd_v6_0_ring_emit_fence,
+       .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
+       .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+       .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+       .test_ring = uvd_v6_0_ring_test_ring,
+       .test_ib = amdgpu_uvd_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_uvd_ring_begin_use,
+       .end_use = amdgpu_uvd_ring_end_use,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
+       if (adev->asic_type >= CHIP_POLARIS10) {
+               adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
+               DRM_INFO("UVD is enabled in VM mode\n");
+       } else {
+               adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
+               DRM_INFO("UVD is enabled in physical mode\n");
+       }
 }
 
 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
index 45d92aceb485f7477ec80c4d31aca52228c2c20a..80a37a60218136b5912ec7db9be5e8d21fb9ebd0 100644 (file)
@@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
        .test_ib = amdgpu_vce_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_vce_ring_begin_use,
+       .end_use = amdgpu_vce_ring_end_use,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
index 30e8099e94c5629ca6d7703526b20bf0a715267c..c271abffd8dd7475ea0b25101a86d1b32b53696c 100644 (file)
@@ -43,6 +43,7 @@
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
 #define VCE_V3_0_STACK_SIZE    (64 * 1024)
@@ -51,6 +52,7 @@
 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vce_v3_0_wait_for_idle(void *handle);
 
 /**
  * vce_v3_0_ring_get_rptr - get read pointer
@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
        vce_v3_0_override_vce_clock_gating(adev, false);
 }
 
+static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       for (i = 0; i < 10; ++i) {
+               for (j = 0; j < 100; ++j) {
+                       uint32_t status = RREG32(mmVCE_STATUS);
+
+                       if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+                               return 0;
+                       mdelay(10);
+               }
+
+               DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+               WREG32_P(mmVCE_SOFT_RESET,
+                       VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+               WREG32_P(mmVCE_SOFT_RESET, 0,
+                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
 /**
  * vce_v3_0_start - start VCE block
  *
@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
 static int vce_v3_0_start(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
-       int idx, i, j, r;
+       int idx, r;
+
+       ring = &adev->vce.ring[0];
+       WREG32(mmVCE_RB_RPTR, ring->wptr);
+       WREG32(mmVCE_RB_WPTR, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+       ring = &adev->vce.ring[1];
+       WREG32(mmVCE_RB_RPTR2, ring->wptr);
+       WREG32(mmVCE_RB_WPTR2, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
-
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 
                vce_v3_0_mc_resume(adev, idx);
 
-               /* set BUSY flag */
-               WREG32_P(mmVCE_STATUS, 1, ~1);
+               WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
+                        ~VCE_STATUS__JOB_BUSY_MASK);
+
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
                else
                        WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
                                ~VCE_VCPU_CNTL__CLK_EN_MASK);
 
-               WREG32_P(mmVCE_SOFT_RESET,
-                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-               mdelay(100);
-
                WREG32_P(mmVCE_SOFT_RESET, 0,
                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 
-               for (i = 0; i < 10; ++i) {
-                       uint32_t status;
-                       for (j = 0; j < 100; ++j) {
-                               status = RREG32(mmVCE_STATUS);
-                               if (status & 2)
-                                       break;
-                               mdelay(10);
-                       }
-                       r = 0;
-                       if (status & 2)
-                               break;
-
-                       DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-                       WREG32_P(mmVCE_SOFT_RESET,
-                               VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                               ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-                       mdelay(10);
-                       WREG32_P(mmVCE_SOFT_RESET, 0,
-                               ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-                       mdelay(10);
-                       r = -1;
-               }
+               mdelay(100);
+
+               r = vce_v3_0_firmware_loaded(adev);
 
                /* clear BUSY flag */
-               WREG32_P(mmVCE_STATUS, 0, ~1);
+               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
 
                /* Set Clock-Gating off */
                if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
        mutex_unlock(&adev->grbm_idx_mutex);
 
-       ring = &adev->vce.ring[0];
-       WREG32(mmVCE_RB_RPTR, ring->wptr);
-       WREG32(mmVCE_RB_WPTR, ring->wptr);
-       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+       return 0;
+}
 
-       ring = &adev->vce.ring[1];
-       WREG32(mmVCE_RB_RPTR2, ring->wptr);
-       WREG32(mmVCE_RB_WPTR2, ring->wptr);
-       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+static int vce_v3_0_stop(struct amdgpu_device *adev)
+{
+       int idx;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (idx = 0; idx < 2; ++idx) {
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
+               if (idx == 0)
+                       WREG32_P(mmGRBM_GFX_INDEX, 0,
+                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+               else
+                       WREG32_P(mmGRBM_GFX_INDEX,
+                               GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
+                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+
+               if (adev->asic_type >= CHIP_STONEY)
+                       WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
+               else
+                       WREG32_P(mmVCE_VCPU_CNTL, 0,
+                               ~VCE_VCPU_CNTL__CLK_EN_MASK);
+               /* hold on ECPU */
+               WREG32_P(mmVCE_SOFT_RESET,
+                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+               /* clear BUSY flag */
+               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+
+               /* Set Clock-Gating off */
+               if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
+                       vce_v3_0_set_vce_sw_clock_gating(adev, false);
+       }
+
+       WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+       mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
 }
@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
 
 static int vce_v3_0_hw_fini(void *handle)
 {
-       return 0;
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = vce_v3_0_wait_for_idle(handle);
+       if (r)
+               return r;
+
+       return vce_v3_0_stop(adev);
 }
 
 static int vce_v3_0_suspend(void *handle)
@@ -604,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
+static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+       if (enable)
+               tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+       else
+               tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+       WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
 static int vce_v3_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
@@ -611,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
        int i;
 
+       if (adev->asic_type == CHIP_POLARIS10)
+               vce_v3_set_bypass_mode(adev, enable);
+
        if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
                return 0;
 
@@ -701,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
        .test_ib = amdgpu_vce_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_vce_ring_begin_use,
+       .end_use = amdgpu_vce_ring_end_use,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
index cda7def9dc2c1be7c84d1e4df2c0688065455ff2..03a31c53aec3d18e67b42cbadb4ab30f9e82c766 100644 (file)
@@ -1249,15 +1249,7 @@ static int vi_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS;
-               /* rev0 hardware doesn't support PG */
                adev->pg_flags = 0;
-               if (adev->rev_id != 0x00)
-                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
-                               AMD_PG_SUPPORT_GFX_SMG |
-                               AMD_PG_SUPPORT_GFX_DMG |
-                               AMD_PG_SUPPORT_CP |
-                               AMD_PG_SUPPORT_RLC_SMU_HS |
-                               AMD_PG_SUPPORT_GFX_PIPELINE;
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
        case CHIP_STONEY:
@@ -1276,12 +1268,6 @@ static int vi_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS;
-               adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
-                       AMD_PG_SUPPORT_GFX_SMG |
-                       AMD_PG_SUPPORT_GFX_DMG |
-                       AMD_PG_SUPPORT_GFX_PIPELINE |
-                       AMD_PG_SUPPORT_CP |
-                       AMD_PG_SUPPORT_RLC_SMU_HS;
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
        default:
index 293329719bba052f6b98edf2fc1b3f320ac609b7..809759f7bb8189ffbc68095a1583b947c42e6e2c 100644 (file)
@@ -27,6 +27,7 @@
 #define mmMM_INDEX                                                              0x0
 #define mmMM_INDEX_HI                                                           0x6
 #define mmMM_DATA                                                               0x1
+#define mmCC_BIF_BX_STRAP2                                                     0x152A
 #define mmBIF_MM_INDACCESS_CNTL                                                 0x1500
 #define mmBIF_DOORBELL_APER_EN                                                  0x1501
 #define mmBUS_CNTL                                                              0x1508
index 6f6fb34742d299a46d2b8d50764a43f732d9de83..ec69869c55ff530da2b00a1dae3833896ce07b5b 100644 (file)
 #define mmUVD_MIF_RECON1_ADDR_CONFIG                                            0x39c5
 #define ixUVD_MIF_SCLR_ADDR_CONFIG                                              0x4
 #define mmUVD_JPEG_ADDR_CONFIG                                                  0x3a1f
+#define mmUVD_GP_SCRATCH8                                                       0x3c0a
+#define mmUVD_GP_SCRATCH9                                                       0x3c0b
 #define mmUVD_GP_SCRATCH4                                                       0x3d38
 
 #endif /* UVD_6_0_D_H */
index 0c8c85d2a2a57f9a4ca11f52d0d7faaf5da7ef27..b86aba9d019fbe7bf00f22c798d0c8bd50cdcf29 100644 (file)
@@ -113,6 +113,8 @@ enum cgs_system_info_id {
        CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
        CGS_SYSTEM_INFO_PCIE_GEN_INFO,
        CGS_SYSTEM_INFO_PCIE_MLW,
+       CGS_SYSTEM_INFO_PCIE_DEV,
+       CGS_SYSTEM_INFO_PCIE_REV,
        CGS_SYSTEM_INFO_CG_FLAGS,
        CGS_SYSTEM_INFO_PG_FLAGS,
        CGS_SYSTEM_INFO_GFX_CU_INFO,
@@ -121,13 +123,13 @@ enum cgs_system_info_id {
 };
 
 struct cgs_system_info {
-       uint64_t       size;
-       uint64_t       info_id;
+       uint64_t                        size;
+       enum cgs_system_info_id         info_id;
        union {
-               void           *ptr;
-               uint64_t        value;
+               void                    *ptr;
+               uint64_t                value;
        };
-       uint64_t               padding[13];
+       uint64_t                        padding[13];
 };
 
 /*
@@ -160,6 +162,10 @@ struct cgs_firmware_info {
        uint16_t                feature_version;
        uint32_t                image_size;
        uint64_t                mc_addr;
+
+       /* only for smc firmware */
+       uint32_t                ucode_start_address;
+
        void                    *kptr;
 };
 
index f9e03ad0baa29faea18d5bd0fc123c9f7b769c88..abbb658bdc1e9031f0f5e12bfbbce3a052f1cc58 100644 (file)
@@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle)
 
 static bool pp_is_idle(void *handle)
 {
-       return 0;
+       return false;
 }
 
 static int pp_wait_for_idle(void *handle)
index d6635cc4b0fc046bcee3ebf5885dfd9b23229f6b..635fc4b4818484c1741d1404024452382c8a5fcc 100644 (file)
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
        system_config_tasks,
        setup_asic_tasks,
        enable_dynamic_state_management_tasks,
-       enable_clock_power_gatings_tasks,
        get_2d_performance_state_tasks,
        set_performance_state_tasks,
        initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
        setup_asic_tasks,
        enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
        enable_dynamic_state_management_tasks,
-       enable_clock_power_gatings_tasks,
        enable_disable_bapm_tasks,
        initialize_thermal_controller_tasks,
        get_2d_performance_state_tasks,
index 2da548f6337e822d3eeae9783d1b37190aa07d8c..2028980f1ed4f5be9694ce8fed4abb32e94fe014 100644 (file)
@@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
                cz_dpm_powerdown_uvd(hwmgr);
        } else {
                cz_dpm_powerup_uvd(hwmgr);
-               cgs_set_clockgating_state(hwmgr->device,
-                                               AMD_IP_BLOCK_TYPE_UVD,
-                                               AMD_PG_STATE_GATE);
                cgs_set_powergating_state(hwmgr->device,
                                                AMD_IP_BLOCK_TYPE_UVD,
                                                AMD_CG_STATE_UNGATE);
+               cgs_set_clockgating_state(hwmgr->device,
+                                               AMD_IP_BLOCK_TYPE_UVD,
+                                               AMD_PG_STATE_GATE);
                cz_dpm_update_uvd_dpm(hwmgr, false);
        }
 
@@ -211,14 +211,14 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                        } else {
                                cz_dpm_powerup_vce(hwmgr);
                                cz_hwmgr->vce_power_gated = false;
-                               cgs_set_clockgating_state(
-                                                       hwmgr->device,
-                                                       AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
                                cgs_set_powergating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
                                                        AMD_CG_STATE_UNGATE);
+                               cgs_set_clockgating_state(
+                                                       hwmgr->device,
+                                                       AMD_IP_BLOCK_TYPE_VCE,
+                                                       AMD_PG_STATE_GATE);
                                cz_dpm_update_vce_dpm(hwmgr);
                                cz_enable_disable_vce_dpm(hwmgr, true);
                                return 0;
index 9bf622e123b652e3e1d9fcd6676fafea1f041779..8cc0df9b534ac4dc0105f9e0418203e408a33755 100644 (file)
@@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        cz_ps->action = cz_current_ps->action;
 
-       if ((force_high == false) && (cz_ps->action == FORCE_HIGH))
+       if (!force_high && (cz_ps->action == FORCE_HIGH))
                cz_ps->action = CANCEL_FORCE_HIGH;
-       else if ((force_high == true) && (cz_ps->action != FORCE_HIGH))
+       else if (force_high && (cz_ps->action != FORCE_HIGH))
                cz_ps->action = FORCE_HIGH;
        else
                cz_ps->action = DO_NOTHING;
@@ -1656,7 +1656,7 @@ static void cz_hw_print_display_cfg(
        struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
        uint32_t data = 0;
 
-       if (hw_data->cc6_settings.cc6_setting_changed == true) {
+       if (hw_data->cc6_settings.cc6_setting_changed) {
 
                hw_data->cc6_settings.cc6_setting_changed = false;
 
index e1b649bd5344919b55786bea9fd5fd49c48e240b..5afe82068b29b3b8c1a8443056f2e2db4b4699d5 100644 (file)
@@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
                fiji_update_uvd_dpm(hwmgr, false);
                cgs_set_clockgating_state(hwmgr->device,
                                          AMD_IP_BLOCK_TYPE_UVD,
-                                         AMD_PG_STATE_UNGATE);
+                                         AMD_CG_STATE_UNGATE);
        }
 
        return 0;
index 744aa886a2be789abc63c3c626dc2f07b3a39edf..120a9e2c31523d14b226e0b4d813274e61e6a940 100644 (file)
@@ -698,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        if (0 == result) {
                struct cgs_system_info sys_info = {0};
 
-               data->is_tlu_enabled = 0;
+               data->is_tlu_enabled = false;
                hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
                                FIJI_MAX_HARDWARE_POWERLEVELS;
                hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -1450,7 +1450,7 @@ static void fiji_setup_pcie_table_entry(
 {
        dpm_table->dpm_levels[index].value = pcie_gen;
        dpm_table->dpm_levels[index].param1 = pcie_lanes;
-       dpm_table->dpm_levels[index].enabled = 1;
+       dpm_table->dpm_levels[index].enabled = true;
 }
 
 static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
@@ -1662,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
 {
        uint32_t count;
        uint8_t index;
-       int result = 0;
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1684,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
                                                VOLTAGE_SCALE)) / 25);
        }
 
-       return result;
+       return 0;
 }
 
 /**
@@ -4389,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->sclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table &
                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-               PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
-                               "Trying to freeze SCLK DPM when DPM is disabled",);
+               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+                                   "Trying to freeze SCLK DPM when DPM is disabled",
+                                   );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_SCLKDPM_FreezeLevel),
                                "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4400,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->mclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table &
                 DPMTABLE_OD_UPDATE_MCLK)) {
-               PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
-                               "Trying to freeze MCLK DPM when DPM is disabled",);
+               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+                                   "Trying to freeze MCLK DPM when DPM is disabled",
+                                   );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_MCLKDPM_FreezeLevel),
                                "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4571,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
 static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
                const struct fiji_power_state *fiji_ps)
 {
-       int result = 0;
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
        uint32_t high_limit_count;
 
@@ -4591,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
                        fiji_ps->performance_levels[0].memory_clock,
                        fiji_ps->performance_levels[high_limit_count].memory_clock);
 
-       return result;
+       return 0;
 }
 
 static int fiji_generate_dpm_level_enable_mask(
@@ -4850,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                (data->need_update_smu7_dpm_table &
                (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
 
-               PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
-                               "Trying to Unfreeze SCLK DPM when DPM is disabled",);
+               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+                                   "Trying to Unfreeze SCLK DPM when DPM is disabled",
+                                   );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_SCLKDPM_UnfreezeLevel),
                        "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -4861,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->mclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
 
-               PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
-                               "Trying to Unfreeze MCLK DPM when DPM is disabled",);
+               PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+                                   "Trying to Unfreeze MCLK DPM when DPM is disabled",
+                                   );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_SCLKDPM_UnfreezeLevel),
                    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
index 7a705cee0cc23624b7bc131601460cd317e817c6..a6abe81bc8435598fbd2b39a05297ac7ed1b24b8 100644 (file)
@@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
                       struct phm_runtime_table_header *rt_table,
                       void *input, void *output)
 {
-       int result = 0;
-       void *temp_storage = NULL;
+       int result;
+       void *temp_storage;
 
        if (hwmgr == NULL || rt_table == NULL) {
                printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
@@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
                        printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
                        return -ENOMEM;
                }
+       } else {
+               temp_storage = NULL;
        }
 
        result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
 
-       if (NULL != temp_storage)
-               kfree(temp_storage);
+       kfree(temp_storage);
 
        return result;
 }
index 03b6128ebc203c66ad26cf2f2cb5f15786e08820..27e07624ac28b5abc77c76a9ea2ad407c188ba11 100644 (file)
@@ -534,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
 
        /* initialize vddc_dep_on_dal_pwrl table */
        table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
-       table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+       table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
 
        if (NULL == table_clk_vlt) {
                printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
index aeec25c66aa83ed5600981b3456cad076d9a7166..b5edb51059860ece79b27b2ace1f566b584cb610 100644 (file)
@@ -116,7 +116,7 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
                polaris10_update_uvd_dpm(hwmgr, false);
                cgs_set_clockgating_state(hwmgr->device,
                                AMD_IP_BLOCK_TYPE_UVD,
-                               AMD_PG_STATE_UNGATE);
+                               AMD_CG_STATE_UNGATE);
        }
 
        return 0;
@@ -131,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 
        data->vce_power_gated = bgate;
 
-       if (bgate)
+       if (bgate) {
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_VCE,
+                               AMD_CG_STATE_GATE);
+               polaris10_update_vce_dpm(hwmgr, true);
                polaris10_phm_powerdown_vce(hwmgr);
-       else
+       } else {
                polaris10_phm_powerup_vce(hwmgr);
-
+               polaris10_update_vce_dpm(hwmgr, false);
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_VCE,
+                               AMD_CG_STATE_UNGATE);
+       }
        return 0;
 }
 
index 9d764c4d253e3feeee4cd9ee921acf3c9805c0db..769636a0c5b5f38d92f29d94415d69c119bbf481 100644 (file)
@@ -780,7 +780,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        table->Smio[level] |=
                                data->mvdd_voltage_table.entries[level].smio_low;
                }
-               table->SmioMask2 = data->vddci_voltage_table.mask_low;
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
 
                table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
        }
@@ -1470,22 +1470,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
 
        table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
 
-       if (!data->sclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0,
-                * already converted to SMC_UL */
-               sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
-               result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_sclk,
-                               table->ACPILevel.SclkFrequency,
-                               &table->ACPILevel.MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDC voltage value "
-                               "in Clock Dependency Table", );
-       } else {
-               sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
-               table->ACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
-       }
+
+       /* Get MinVoltage and Frequency from DPM0,
+        * already converted to SMC_UL */
+       sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk,
+                       sclk_frequency,
+                       &table->ACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDC voltage value "
+                       "in Clock Dependency Table",
+                       );
+
 
        result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
        PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
@@ -1510,24 +1507,17 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
        CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
        CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
 
-       if (!data->mclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-               table->MemoryACPILevel.MclkFrequency =
-                               data->dpm_table.mclk_table.dpm_levels[0].value;
-               result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk,
-                               table->MemoryACPILevel.MclkFrequency,
-                               &table->MemoryACPILevel.MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDCI voltage value "
-                               "in Clock Dependency Table",
-                               );
-       } else {
-               table->MemoryACPILevel.MclkFrequency =
-                               data->vbios_boot_state.mclk_bootup_value;
-               table->MemoryACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
-       }
+
+       /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+       table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_mclk,
+                       table->MemoryACPILevel.MclkFrequency,
+                       &table->MemoryACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDCI voltage value "
+                       "in Clock Dependency Table",
+                       );
 
        us_mvdd = 0;
        if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
@@ -1572,6 +1562,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
                        table_info->mm_dep_table;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
 
        table->VceLevelCount = (uint8_t)(mm_table->count);
        table->VceBootLevel = 0;
@@ -1581,9 +1572,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
                table->VceLevel[count].MinVoltage = 0;
                table->VceLevel[count].MinVoltage |=
                                (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
                table->VceLevel[count].MinVoltage |=
-                               ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                               (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
 
                /*retrieve divider value for VBIOS */
@@ -1612,6 +1612,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
                        table_info->mm_dep_table;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
 
        table->SamuBootLevel = 0;
        table->SamuLevelCount = (uint8_t)(mm_table->count);
@@ -1622,8 +1623,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
                table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
                table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
                                VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
 
                /* retrieve divider value for VBIOS */
@@ -1706,6 +1715,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
                        table_info->mm_dep_table;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
 
        table->UvdLevelCount = (uint8_t)(mm_table->count);
        table->UvdBootLevel = 0;
@@ -1716,8 +1726,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
                table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
                table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
                                VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
 
                /* retrieve divider value for VBIOS */
@@ -1738,8 +1756,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
                CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
                CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
                CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
        }
+
        return result;
 }
 
@@ -1809,7 +1827,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 {
        uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+       uint8_t i, stretch_amount, volt_offset = 0;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1839,26 +1857,29 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
        for (i = 0; i < sclk_table->count; i++) {
                data->smc_state_table.Sclk_CKS_masterEn0_7 |=
                                sclk_table->entries[i].cks_enable << i;
-
-               volt_without_cks =  (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \
-                                       (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100);
-
-               volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \
-                               (sclk_table->entries[i].clk/10000 * 649434 /1000  - 18005)/10);
+               if (hwmgr->chip_id == CHIP_POLARIS10) {
+                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+                                               (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+               } else {
+                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+               }
 
                if (volt_without_cks >= volt_with_cks)
                        volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
 
                data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
        }
 
+       data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
        /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
+       if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
+                       stretch_amount != 4 && stretch_amount != 5) {
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_ClockStretcher);
                PP_ASSERT_WITH_CODE(false,
@@ -2629,6 +2650,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
 
+       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
        tmp_result = polaris10_enable_sclk_control(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable SCLK control!", result = tmp_result);
@@ -2645,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable deep sleep master switch!", result = tmp_result);
 
+       tmp_result = polaris10_enable_didt_config(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to enable deep sleep master switch!", result = tmp_result);
+
        tmp_result = polaris10_start_dpm(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to start DPM!", result = tmp_result);
@@ -2784,13 +2811,13 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
                                        PHM_PlatformCaps_DynamicUVDState);
 
        /* power tune caps Assume disabled */
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_SQRamping);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_DBRamping);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_TDRamping);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_TCPRamping);
 
        if (hwmgr->powercontainment_enabled)
@@ -2847,7 +2874,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint16_t vv_id;
-       uint16_t vddc = 0;
+       uint32_t vddc = 0;
        uint16_t i, j;
        uint32_t sclk = 0;
        struct phm_ppt_v1_information *table_info =
@@ -2878,8 +2905,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
                                continue;
                        }
 
-                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-                       PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+                        * real voltage level in unit of 0.01mv */
+                       PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
                                        "Invalid VDDC value", result = -EINVAL;);
 
                        /* the voltage should not be zero nor equal to leakage ID */
@@ -3105,6 +3133,31 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *table_info =
+                      (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+                       table_info->vdd_dep_on_mclk;
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+                       table_info->vddc_lookup_table;
+       uint32_t i;
+
+       if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
+               if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+                       return 0;
+
+               for (i = 0; i < lookup_table->count; i++) {
+                       if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+                               dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+                               return 0;
+                       }
+               }
+       }
+       return 0;
+}
+
+
 int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data;
@@ -3188,6 +3241,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
        polaris10_set_features_platform_caps(hwmgr);
 
+       polaris10_patch_voltage_workaround(hwmgr);
        polaris10_init_dpm_defaults(hwmgr);
 
        /* Get leakage voltage based on leakage ID. */
@@ -3207,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        if (0 == result) {
                struct cgs_system_info sys_info = {0};
 
-               data->is_tlu_enabled = 0;
+               data->is_tlu_enabled = false;
 
                hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
                                                        POLARIS10_MAX_HARDWARE_POWERLEVELS;
@@ -3590,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                    hwmgr->platform_descriptor.platformCaps,
                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
+
        disable_mclk_switching = (1 < info.display_count) ||
                                    disable_mclk_switching_for_frame_lock;
 
@@ -4094,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->sclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table &
                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-               PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
-                               "Trying to freeze SCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+                                   "Trying to freeze SCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_SCLKDPM_FreezeLevel),
@@ -4106,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->mclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table &
                 DPMTABLE_OD_UPDATE_MCLK)) {
-               PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
-                               "Trying to freeze MCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+                                   "Trying to freeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_MCLKDPM_FreezeLevel),
@@ -4267,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
 static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
                const struct polaris10_power_state *polaris10_ps)
 {
-       int result = 0;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint32_t high_limit_count;
 
@@ -4287,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
                        polaris10_ps->performance_levels[0].memory_clock,
                        polaris10_ps->performance_levels[high_limit_count].memory_clock);
 
-       return result;
+       return 0;
 }
 
 static int polaris10_generate_dpm_level_enable_mask(
@@ -4370,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
        return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
 }
 
-static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
+int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 {
-       const struct phm_set_power_state_input *states =
-                       (const struct phm_set_power_state_input *)input;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       const struct polaris10_power_state *polaris10_nps =
-                       cast_const_phw_polaris10_power_state(states->pnew_state);
-       const struct polaris10_power_state *polaris10_cps =
-                       cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
        uint32_t mm_boot_level_offset, mm_boot_level_value;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
-       if (polaris10_nps->vce_clks.evclk > 0 &&
-       (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) {
-
-               data->smc_state_table.VceBootLevel =
+       if (!bgate) {
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_StablePState))
+                       data->smc_state_table.VceBootLevel =
                                (uint8_t) (table_info->mm_dep_table->count - 1);
+               else
+                       data->smc_state_table.VceBootLevel = 0;
 
                mm_boot_level_offset = data->dpm_table_start +
                                offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
@@ -4401,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
                cgs_write_ind_register(hwmgr->device,
                                CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
 
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
                        smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
                                        PPSMC_MSG_VCEDPM_SetEnabledMask,
                                        (uint32_t)1 << data->smc_state_table.VceBootLevel);
-
-                       polaris10_enable_disable_vce_dpm(hwmgr, true);
-               } else if (polaris10_nps->vce_clks.evclk == 0 &&
-                               polaris10_cps != NULL &&
-                               polaris10_cps->vce_clks.evclk > 0)
-                       polaris10_enable_disable_vce_dpm(hwmgr, false);
        }
 
+       polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
+
        return 0;
 }
 
@@ -4497,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                (data->need_update_smu7_dpm_table &
                (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
 
-               PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
-                               "Trying to Unfreeze SCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+                                   "Trying to Unfreeze SCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4509,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->mclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
 
-               PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
-                               "Trying to Unfreeze MCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+                                   "Trying to Unfreeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
                                PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4557,6 +4602,17 @@ static int polaris10_notify_link_speed_change_after_state_change(
        return 0;
 }
 
+static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+               (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+       return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
+}
+
+
+
 static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
 {
        int tmp_result, result = 0;
@@ -4590,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
                        "Failed to generate DPM level enabled mask!",
                        result = tmp_result);
 
-       tmp_result = polaris10_update_vce_dpm(hwmgr, input);
-       PP_ASSERT_WITH_CODE((0 == tmp_result),
-                       "Failed to update VCE DPM!",
-                       result = tmp_result);
-
        tmp_result = polaris10_update_sclk_threshold(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to update SCLK threshold!",
@@ -4605,6 +4656,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
                        "Failed to program memory timing parameters!",
                        result = tmp_result);
 
+       tmp_result = polaris10_notify_smc_display(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to notify smc display settings!",
+                       result = tmp_result);
+
        tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to unfreeze SCLK MCLK DPM!",
@@ -4639,6 +4695,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
                        PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
 }
 
+
 int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
@@ -4658,8 +4715,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
 
        if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
                polaris10_notify_smc_display_change(hwmgr, false);
-       else
-               polaris10_notify_smc_display_change(hwmgr, true);
+
 
        return 0;
 }
@@ -4700,6 +4756,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
        frame_time_in_us = 1000000 / refresh_rate;
 
        pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+       data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
        display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
 
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
@@ -4708,7 +4766,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
 
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
 
-       polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
 
        return 0;
 }
@@ -4821,7 +4878,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
                return 0;
        }
 
-       data->need_long_memory_training = true;
+       data->need_long_memory_training = false;
 
 /*
  *     PPMCME_FirmwareDescriptorEntry *pfd = NULL;
index fd38b0d7a3c2b27fb02b515e44434ba41dd7eadc..33c33947e82756f879bbe88f037d6ca00d377e7f 100644 (file)
@@ -311,6 +311,7 @@ struct polaris10_hwmgr {
        bool apply_optimized_settings;
        uint32_t                              avfs_vdroop_override_setting;
        bool                                  apply_avfs_cks_off_voltage;
+       uint32_t                              frame_time_x2;
 };
 
 /* To convert to Q8.8 format for firmware */
@@ -351,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
 int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
+int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 #endif
 
index 5620e268b55307ecabd1e0bd9287ea37148a3f1b..b9cb240a135d24c5a79c88e5258d217e91a2316a 100644 (file)
 #include "polaris10_smumgr.h"
 #include "smu74_discrete.h"
 #include "pp_debug.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "oss/oss_3_0_sh_mask.h"
 
 #define VOLTAGE_SCALE  4
 #define POWERTUNE_DEFAULT_SET_MAX    1
 
+uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+       {   0xFFFFFFFF  }
+};
+
+struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+       {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+       {   0xFFFFFFFF  }
+};
+
+struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0009,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0009,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   0xFFFFFFFF  }
+};
+
+struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                Shift                                               Value       Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0008,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0008,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+       {   0xFFFFFFFF  }
+};
+
 static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
        /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
         * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
@@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+{
+
+       uint32_t en = enable ? 1 : 0;
+       int32_t result = 0;
+       uint32_t data;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+               data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+               DIDTBlock_Info &= ~SQ_Enable_MASK;
+               DIDTBlock_Info |= en << SQ_Enable_SHIFT;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+               data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+               DIDTBlock_Info &= ~DB_Enable_MASK;
+               DIDTBlock_Info |= en << DB_Enable_SHIFT;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+               data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+               DIDTBlock_Info &= ~TD_Enable_MASK;
+               DIDTBlock_Info |= en << TD_Enable_SHIFT;
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+               data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+               data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+               DIDTBlock_Info &= ~TCP_Enable_MASK;
+               DIDTBlock_Info |= en << TCP_Enable_SHIFT;
+       }
+
+       if (enable)
+               result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
+
+       return result;
+}
+
+static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+                               struct polaris10_pt_config_reg *cac_config_regs)
+{
+       struct polaris10_pt_config_reg *config_regs = cac_config_regs;
+       uint32_t cache = 0;
+       uint32_t data = 0;
+
+       PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
+
+       while (config_regs->offset != 0xFFFFFFFF) {
+               if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
+                       cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+               else {
+                       switch (config_regs->type) {
+                       case POLARIS10_CONFIGREG_SMC_IND:
+                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
+                               break;
+
+                       case POLARIS10_CONFIGREG_DIDT_IND:
+                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+                               break;
+
+                       case POLARIS10_CONFIGREG_GC_CAC_IND:
+                               data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+                               break;
+
+                       default:
+                               data = cgs_read_register(hwmgr->device, config_regs->offset);
+                               break;
+                       }
+
+                       data &= ~config_regs->mask;
+                       data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+                       data |= cache;
+
+                       switch (config_regs->type) {
+                       case POLARIS10_CONFIGREG_SMC_IND:
+                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
+                               break;
+
+                       case POLARIS10_CONFIGREG_DIDT_IND:
+                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+                               break;
+
+                       case POLARIS10_CONFIGREG_GC_CAC_IND:
+                               cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+                               break;
+
+                       default:
+                               cgs_write_register(hwmgr->device, config_regs->offset, data);
+                               break;
+                       }
+                       cache = 0;
+               }
+
+               config_regs++;
+       }
+
+       return 0;
+}
+
+int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       uint32_t num_se = 0;
+       uint32_t count, value, value2;
+       struct cgs_system_info sys_info = {0};
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+       result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+
+       if (result == 0)
+               num_se = sys_info.value;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+
+               /* TO DO Pre DIDT disable clock gating */
+               value = 0;
+               value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
+               for (count = 0; count < num_se; count++) {
+                       value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
+                               | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
+                               | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
+                       cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
+
+                       if (hwmgr->chip_id == CHIP_POLARIS10) {
+                               result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                               result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                       } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+                               result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                               result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+                               PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+                       }
+               }
+               cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
+
+               result = polaris10_enable_didt(hwmgr, true);
+               PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
+
+               /* TO DO Post DIDT enable clock gating */
+       }
+
+       return 0;
+}
+
+int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+       int result;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+               /* TO DO Pre DIDT disable clock gating */
+
+               result = polaris10_enable_didt(hwmgr, false);
+               PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
+               /* TO DO Post DIDT enable clock gating */
+       }
+
+       return 0;
+}
+
+
 static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
index d492d6d288679e6545408864b390baadd71329cc..bc78e28f010d757646f3d65b6d0e3bcde72b18e5 100644 (file)
@@ -27,6 +27,7 @@ enum polaris10_pt_config_reg_type {
        POLARIS10_CONFIGREG_MMR = 0,
        POLARIS10_CONFIGREG_SMC_IND,
        POLARIS10_CONFIGREG_DIDT_IND,
+       POLARIS10_CONFIGREG_GC_CAC_IND,
        POLARIS10_CONFIGREG_CACHE,
        POLARIS10_CONFIGREG_MAX
 };
@@ -49,6 +50,14 @@ enum polaris10_pt_config_reg_type {
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
 
+#define ixGC_CAC_CNTL 0x0000
+#define ixDIDT_SQ_STALL_CTRL 0x0004
+#define  ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_TD_STALL_CTRL 0x0044
+#define ixDIDT_TD_TUNING_CTRL 0x0045
+#define ixDIDT_TCP_STALL_CTRL 0x0064
+#define ixDIDT_TCP_TUNING_CTRL 0x0065
+
 struct polaris10_pt_config_reg {
        uint32_t                           offset;
        uint32_t                           mask;
@@ -80,6 +89,6 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
 int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
 int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
 int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-
+int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
 #endif  /* POLARIS10_POWERTUNE_H */
 
index a3c38bbd1e94d5596ba9190772d40b1fcb5c28db..1944d289f84603f0c0eab21a478a0c5ace8835a7 100644 (file)
@@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
        int result;
        struct cgs_system_info info = {0};
 
-       if( 0 != acpi_atcs_notify_pcie_device_ready(device))
+       if (acpi_atcs_notify_pcie_device_ready(device))
                return -EINVAL;
 
        info.size = sizeof(struct cgs_system_info);
index 5d70e2c47faf9731c35c626392620331b233d5d2..26f3e30d0fefde6a77628b1429ea4f952cb900ba 100644 (file)
@@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770(
 
        /* They are both in 10KHz Units. */
        engine_clock_parameters.ulTargetEngineClock =
-               (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK;
-       engine_clock_parameters.ulTargetEngineClock |=
-               (COMPUTE_ENGINE_PLL_PARAM << 24);
+               cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
+                           ((COMPUTE_ENGINE_PLL_PARAM << 24)));
 
        /* in 10 khz units.*/
        engine_clock_parameters.sReserved.ulClock =
-               (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK;
+               cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
        return cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
                        &engine_clock_parameters);
@@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si(
        COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
        int result;
 
-       mpll_parameters.ulClock = (uint32_t) clock_value;
+       mpll_parameters.ulClock = cpu_to_le32(clock_value);
        mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
 
        result = cgs_atom_exec_cmd_table
@@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si(
 
        if (0 == result) {
                mpll_param->mpll_fb_divider.clk_frac =
-                       mpll_parameters.ulFbDiv.usFbDivFrac;
+                       le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
                mpll_param->mpll_fb_divider.cl_kf =
-                       mpll_parameters.ulFbDiv.usFbDiv;
+                       le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
                mpll_param->mpll_post_divider =
                        (uint32_t)mpll_parameters.ucPostDiv;
                mpll_param->vco_mode =
@@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
        COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
        int result;
 
-       mpll_parameters.ulClock.ulClock = (uint32_t)clock_value;
+       mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
 
        result = cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
@@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
        COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
        int result;
 
-       pll_parameters.ulClock = clock_value;
+       pll_parameters.ulClock = cpu_to_le32(clock_value);
 
        result = cgs_atom_exec_cmd_table
                (hwmgr->device,
@@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 
        if (0 == result) {
                dividers->pll_post_divider = pll_parameters.ucPostDiv;
-               dividers->real_clock = pll_parameters.ulClock;
+               dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
        }
 
        return result;
@@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi(
        COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
        int result;
 
-       pll_patameters.ulClock.ulClock = clock_value;
+       pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
        pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
 
        result = cgs_atom_exec_cmd_table
@@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi(
                dividers->pll_post_divider =
                        pll_patameters.ulClock.ucPostDiv;
                dividers->real_clock =
-                       pll_patameters.ulClock.ulClock;
+                       le32_to_cpu(pll_patameters.ulClock.ulClock);
 
                dividers->ul_fb_div.ul_fb_div_frac =
-                       pll_patameters.ulFbDiv.usFbDivFrac;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
                dividers->ul_fb_div.ul_fb_div =
-                       pll_patameters.ulFbDiv.usFbDiv;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
 
                dividers->uc_pll_ref_div =
                        pll_patameters.ucPllRefDiv;
@@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
        COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
        int result;
 
-       pll_patameters.ulClock.ulClock = clock_value;
+       pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
        pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
 
        result = cgs_atom_exec_cmd_table
@@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
        COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
        int result;
 
-       pll_patameters.ulClock.ulClock = clock_value;
+       pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
        pll_patameters.ulClock.ucPostDiv =
                COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
 
@@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi(
                dividers->pll_post_divider =
                        pll_patameters.ulClock.ucPostDiv;
                dividers->real_clock =
-                       pll_patameters.ulClock.ulClock;
+                       le32_to_cpu(pll_patameters.ulClock.ulClock);
 
                dividers->ul_fb_div.ul_fb_div_frac =
-                       pll_patameters.ulFbDiv.usFbDivFrac;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
                dividers->ul_fb_div.ul_fb_div =
-                       pll_patameters.ulFbDiv.usFbDiv;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
 
                dividers->uc_pll_ref_div =
                        pll_patameters.ucPllRefDiv;
@@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3(
 
        for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
                voltage_table->entries[i].value =
-                       voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue;
+                       le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
                voltage_table->entries[i].smio_low =
-                       voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId;
+                       le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
        }
 
        voltage_table->mask_low    =
-               voltage_object->asGpioVoltageObj.ulGpioMaskVal;
+               le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
        voltage_table->count      =
                voltage_object->asGpioVoltageObj.ucGpioEntryNum;
        voltage_table->phase_delay =
@@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin(
                const uint32_t pinId,
                pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
 {
-       bool bRet = 0;
+       bool bRet = false;
        ATOM_GPIO_PIN_LUT *gpio_lookup_table =
                get_gpio_lookup_table(hwmgr->device);
 
        PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
-                       "Could not find GPIO lookup Table in BIOS.", return -1);
+                       "Could not find GPIO lookup Table in BIOS.", return false);
 
        bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
                gpio_pin_assignment);
@@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk(
                return -1;
 
        if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
-                       (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
-                       getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
+           (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
+            getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
                return -1;
 
        /*-----------------------------------------------------------
@@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        switch (dpm_level) {
        case 1:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
                break;
        case 2:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
                break;
        case 3:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
                break;
        case 4:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
                break;
        case 5:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
                break;
        case 6:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
                break;
        case 7:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
                break;
        default:
                printk(KERN_ERR "DPM Level not supported\n");
                fPowerDPMx = Convert_ULONG_ToFraction(1);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000);
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
        }
 
        /*-------------------------
@@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
                return result;
 
        /* Finally, the actual fuse value */
-       ul_RO_fused = sOutput_FuseValues.ulEfuseValue;
-       fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1);
-       fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1);
+       ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
+       fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
        fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
 
        sCACm_fuse = getASICProfilingInfo->sCACm;
@@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_CACm_fused = sOutput_FuseValues.ulEfuseValue;
-       fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000);
-       fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000);
+       ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
 
        fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
 
@@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_CACb_fused = sOutput_FuseValues.ulEfuseValue;
-       fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000);
-       fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000);
+       ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
 
        fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
 
@@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue;
-       fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000);
-       fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000);
+       ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
 
        fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
                        fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
@@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue;
-       fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000);
-       fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000);
+       ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
+       fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
        fRange = fMultiply(fRange, ConvertToFraction(-1));
 
        fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
@@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue;
-       fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000);
-       fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000);
+       ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
 
        fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
                        fAverage, fRange, sKv_b_fuse.ucEfuseLength);
@@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue;
-       fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000);
-       fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000);
+       ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
+       fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
 
        fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
                        fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
@@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk(
         * PART 2 - Grabbing all required values
         *-------------------------------------------
         */
-       fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000),
+       fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
-       fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000),
+       fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
-       fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000),
+       fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
-       fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000),
+       fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
-       fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000),
+       fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
-       fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000),
+       fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
-       fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000),
+       fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
-       fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000),
+       fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
 
-       fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a);
-       fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b);
-       fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c);
+       fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
+       fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
+       fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
 
-       fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed);
+       fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
 
        fMargin_FMAX_mean = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_Fmax_mean, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
        fMargin_Plat_mean = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_plat_mean, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
        fMargin_FMAX_sigma = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_Fmax_sigma, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
        fMargin_Plat_sigma = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_plat_sigma, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
 
        fMargin_DC_sigma = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_DC_sigma, 100);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
        fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
 
        fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
@@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        fSclk = GetScaledFraction(sclk, 100);
 
        fV_max = fDivide(GetScaledFraction(
-                       getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4));
-       fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10);
-       fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100);
-       fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10);
+                                le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
+       fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
+       fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
+       fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
        fV_FT = fDivide(GetScaledFraction(
-                       getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4));
+                               le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
        fV_min = fDivide(GetScaledFraction(
-                       getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4));
+                                le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
 
        /*-----------------------
         * PART 3
@@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
        fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
        fC_Term = fAdd(fMargin_RO_c,
-                       fAdd(fMultiply(fSM_A0,fLkg_FT),
+                       fAdd(fMultiply(fSM_A0, fLkg_FT),
                        fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
                        fAdd(fMultiply(fSM_A3, fSclk),
                        fSubtract(fSM_A7, fRO_fused)))));
@@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk(
        get_voltage_info_param_space.ucVoltageMode   =
                ATOM_GET_VOLTAGE_EVV_VOLTAGE;
        get_voltage_info_param_space.usVoltageLevel  =
-               virtual_voltage_Id;
+               cpu_to_le16(virtual_voltage_Id);
        get_voltage_info_param_space.ulSCLKFreq      =
-               sclk;
+               cpu_to_le32(sclk);
+
+       result = cgs_atom_exec_cmd_table(hwmgr->device,
+                       GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+                       &get_voltage_info_param_space);
+
+       if (0 != result)
+               return result;
+
+       *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+                               (&get_voltage_info_param_space))->usVoltageLevel);
+
+       return result;
+}
+
+/**
+ * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
+ * @param hwmgr        input: pointer to hwManager
+ * @param virtual_voltage_id      input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
+ * @param voltage                     output: real voltage level in unit of mv
+ */
+int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
+                            uint16_t virtual_voltage_id,
+                            uint16_t *voltage)
+{
+       int result;
+       int entry_id;
+       GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
+
+       /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+       for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
+                       /* found */
+                       break;
+               }
+       }
+
+       PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+               "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+               return -EINVAL;
+       );
+
+       get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
+       get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+       get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
+       get_voltage_info_param_space.ulSCLKFreq =
+               cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
 
        result = cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk(
        if (0 != result)
                return result;
 
-       *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
-                       (&get_voltage_info_param_space))->usVoltageLevel;
+       *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+                               (&get_voltage_info_param_space))->usVoltageLevel);
 
        return result;
 }
@@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
 
        if (entry_found) {
                ssEntry->speed_spectrum_percentage =
-                       ssInfo->usSpreadSpectrumPercentage;
-               ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz;
+                       le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
+               ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
 
                if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
                        (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
@@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
        int result;
        READ_EFUSE_VALUE_PARAMETER efuse_param;
 
-       efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4;
+       efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
        efuse_param.sEfuse.ucBitShift = (uint8_t)
                        (start_index - ((start_index / 32) * 32));
        efuse_param.sEfuse.ucBitLength  = (uint8_t)
@@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
                        &efuse_param);
        if (!result)
-               *efuse = efuse_param.ulEfuseValue & mask;
+               *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
 
        return result;
 }
 
 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
-                                                               uint8_t level)
+                             uint8_t level)
 {
        DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
        int result;
 
-       memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
-       memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
+       memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
+               memory_clock & SET_CLOCK_FREQ_MASK;
+       memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
+               ADJUST_MC_SETTING_PARAM;
        memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
 
        result = cgs_atom_exec_cmd_table
@@ -1256,7 +1303,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 }
 
 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-                               uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+                               uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
 {
 
        int result;
@@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
 
        get_voltage_info_param_space.ucVoltageType = voltage_type;
        get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
-       get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
-       get_voltage_info_param_space.ulSCLKFreq = sclk;
+       get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
+       get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
 
        result = cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
        if (0 != result)
                return result;
 
-       *voltage = get_voltage_info_param_space.usVoltageLevel;
+       *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
 
        return result;
 }
@@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
        for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
                table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
                table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
-               table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
-               table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
-               table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
+               table->entry[i].usFcw_pcc =
+                       le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
+               table->entry[i].usFcw_trans_upper =
+                       le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
+               table->entry[i].usRcw_trans_lower =
+                       le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
        }
 
        return 0;
 }
 
-int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
+                                 struct pp_atom_ctrl__avfs_parameters *param)
 {
        ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
 
@@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a
        if (!profile)
                return -1;
 
-       param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
-       param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
-       param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
-       param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
-       param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
-       param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
-       param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
-       param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
-       param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
-       param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
-       param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
-       param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
-       param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
-       param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
-       param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
-       param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
-       param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
-       param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
-       param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+       param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
+       param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
+       param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
+       param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
+       param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
+       param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
+       param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
+       param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
+       param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
+       param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
+       param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
+       param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+       param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+       param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
+       param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
+       param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
+       param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
        param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
        param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
        param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
        param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
-       param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+       param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
        param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
 
        return 0;
index 248c5db5f38025559d345d311f1cfe3d7e1eb1ad..fc898afce002fe1c681effcb5ef603de854c2745 100644 (file)
@@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters {
 
 extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
 extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage);
 extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
 extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
 extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
@@ -305,7 +306,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
 extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
                                                                uint8_t level);
 extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-                               uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+                               uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
 extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
 
 extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
index 009bd5963ed8c8d9c79472e6dff4c237e4caac36..8f50a038396ce0966c1690c22dd466129a533812 100644 (file)
@@ -50,55 +50,45 @@ typedef union _fInt {
  * Function Declarations
  *  -------------------------------------------------------------------------------
  */
-fInt ConvertToFraction(int);                       /* Use this to convert an INT to a FINT */
-fInt Convert_ULONG_ToFraction(uint32_t);              /* Use this to convert an uint32_t to a FINT */
-fInt GetScaledFraction(int, int);                  /* Use this to convert an INT to a FINT after scaling it by a factor */
-int ConvertBackToInteger(fInt);                    /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
-
-fInt fNegate(fInt);                                /* Returns -1 * input fInt value */
-fInt fAdd (fInt, fInt);                            /* Returns the sum of two fInt numbers */
-fInt fSubtract (fInt A, fInt B);                   /* Returns A-B - Sometimes easier than Adding negative numbers */
-fInt fMultiply (fInt, fInt);                       /* Returns the product of two fInt numbers */
-fInt fDivide (fInt A, fInt B);                     /* Returns A/B */
-fInt fGetSquare(fInt);                             /* Returns the square of a fInt number */
-fInt fSqrt(fInt);                                  /* Returns the Square Root of a fInt number */
-
-int uAbs(int);                                     /* Returns the Absolute value of the Int */
-fInt fAbs(fInt);                                   /* Returns the Absolute value of the fInt */
-int uPow(int base, int exponent);                  /* Returns base^exponent an INT */
-
-void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
-bool Equal(fInt, fInt);                         /* Returns true if two fInts are equal to each other */
-bool GreaterThan(fInt A, fInt B);               /* Returns true if A > B */
-
-fInt fExponential(fInt exponent);                  /* Can be used to calculate e^exponent */
-fInt fNaturalLog(fInt value);                      /* Can be used to calculate ln(value) */
+static fInt ConvertToFraction(int);                       /* Use this to convert an INT to a FINT */
+static fInt Convert_ULONG_ToFraction(uint32_t);           /* Use this to convert an uint32_t to a FINT */
+static fInt GetScaledFraction(int, int);                  /* Use this to convert an INT to a FINT after scaling it by a factor */
+static int ConvertBackToInteger(fInt);                    /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
+
+static fInt fNegate(fInt);                                /* Returns -1 * input fInt value */
+static fInt fAdd (fInt, fInt);                            /* Returns the sum of two fInt numbers */
+static fInt fSubtract (fInt A, fInt B);                   /* Returns A-B - Sometimes easier than Adding negative numbers */
+static fInt fMultiply (fInt, fInt);                       /* Returns the product of two fInt numbers */
+static fInt fDivide (fInt A, fInt B);                     /* Returns A/B */
+static fInt fGetSquare(fInt);                             /* Returns the square of a fInt number */
+static fInt fSqrt(fInt);                                  /* Returns the Square Root of a fInt number */
+
+static int uAbs(int);                                     /* Returns the Absolute value of the Int */
+static int uPow(int base, int exponent);                  /* Returns base^exponent an INT */
+
+static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
+static bool Equal(fInt, fInt);                            /* Returns true if two fInts are equal to each other */
+static bool GreaterThan(fInt A, fInt B);                  /* Returns true if A > B */
+
+static fInt fExponential(fInt exponent);                  /* Can be used to calculate e^exponent */
+static fInt fNaturalLog(fInt value);                      /* Can be used to calculate ln(value) */
 
 /* Fuse decoding functions
  * -------------------------------------------------------------------------------------
  */
-fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
-fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
-fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
+static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
+static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
+static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
 
 /* Internal Support Functions - Use these ONLY for testing or adding to internal functions
  * -------------------------------------------------------------------------------------
  * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
  */
-fInt Add (int, int);                               /* Add two INTs and return Sum as FINT */
-fInt Multiply (int, int);                          /* Multiply two INTs and return Product as FINT */
-fInt Divide (int, int);                            /* You get the idea... */
-fInt fNegate(fInt);
+static fInt Divide (int, int);                            /* Divide two INTs and return result as FINT */
+static fInt fNegate(fInt);
 
-int uGetScaledDecimal (fInt);                      /* Internal function */
-int GetReal (fInt A);                              /* Internal function */
-
-/* Future Additions and Incomplete Functions
- * -------------------------------------------------------------------------------------
- */
-int GetRoundedValue(fInt);                         /* Incomplete function - Useful only when Precision is lacking */
-                                                   /* Let us say we have 2.126 but can only handle 2 decimal points. We could */
-                                                   /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */
+static int uGetScaledDecimal (fInt);                      /* Internal function */
+static int GetReal (fInt A);                              /* Internal function */
 
 /* -------------------------------------------------------------------------------------
  * TROUBLESHOOTING INFORMATION
@@ -115,7 +105,7 @@ int GetRoundedValue(fInt);                         /* Incomplete function - Usef
  * START OF CODE
  * -------------------------------------------------------------------------------------
  */
-fInt fExponential(fInt exponent)        /*Can be used to calculate e^exponent*/
+static fInt fExponential(fInt exponent)        /*Can be used to calculate e^exponent*/
 {
        uint32_t i;
        bool bNegated = false;
@@ -154,7 +144,7 @@ fInt fExponential(fInt exponent)        /*Can be used to calculate e^exponent*/
        return solution;
 }
 
-fInt fNaturalLog(fInt value)
+static fInt fNaturalLog(fInt value)
 {
        uint32_t i;
        fInt upper_bound = Divide(8, 1000);
@@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value)
        return (fAdd(solution, error_term));
 }
 
-fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
+static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
 {
        fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
        fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b
 }
 
 
-fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
+static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
 {
        fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
        fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint
        return f_decoded_value;
 }
 
-fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
+static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
 {
        fInt fLeakage;
        fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min,
        return fLeakage;
 }
 
-fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
+static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
 {
        fInt temp;
 
@@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m
        return temp;
 }
 
-fInt fNegate(fInt X)
+static fInt fNegate(fInt X)
 {
        fInt CONSTANT_NEGONE = ConvertToFraction(-1);
        return (fMultiply(X, CONSTANT_NEGONE));
 }
 
-fInt Convert_ULONG_ToFraction(uint32_t X)
+static fInt Convert_ULONG_ToFraction(uint32_t X)
 {
        fInt temp;
 
@@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X)
        return temp;
 }
 
-fInt GetScaledFraction(int X, int factor)
+static fInt GetScaledFraction(int X, int factor)
 {
        int times_shifted, factor_shifted;
        bool bNEGATED;
@@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor)
 }
 
 /* Addition using two fInts */
-fInt fAdd (fInt X, fInt Y)
+static fInt fAdd (fInt X, fInt Y)
 {
        fInt Sum;
 
@@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y)
 }
 
 /* Addition using two fInts */
-fInt fSubtract (fInt X, fInt Y)
+static fInt fSubtract (fInt X, fInt Y)
 {
        fInt Difference;
 
@@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y)
        return Difference;
 }
 
-bool Equal(fInt A, fInt B)
+static bool Equal(fInt A, fInt B)
 {
        if (A.full == B.full)
                return true;
@@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B)
                return false;
 }
 
-bool GreaterThan(fInt A, fInt B)
+static bool GreaterThan(fInt A, fInt B)
 {
        if (A.full > B.full)
                return true;
@@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B)
                return false;
 }
 
-fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
+static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
 {
        fInt Product;
        int64_t tempProduct;
@@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
        return Product;
 }
 
-fInt fDivide (fInt X, fInt Y)
+static fInt fDivide (fInt X, fInt Y)
 {
        fInt fZERO, fQuotient;
        int64_t longlongX, longlongY;
@@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y)
        return fQuotient;
 }
 
-int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
+static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
 {
        fInt fullNumber, scaledDecimal, scaledReal;
 
@@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch
        return fullNumber.full;
 }
 
-fInt fGetSquare(fInt A)
+static fInt fGetSquare(fInt A)
 {
        return fMultiply(A,A);
 }
 
 /* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
-fInt fSqrt(fInt num)
+static fInt fSqrt(fInt num)
 {
        fInt F_divide_Fprime, Fprime;
        fInt test;
@@ -460,7 +450,7 @@ fInt fSqrt(fInt num)
        return (x_new);
 }
 
-void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
+static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
 {
        fInt *pRoots = &Roots[0];
        fInt temp, root_first, root_second;
@@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
  * -----------------------------------------------------------------------------
  */
 
-/* Addition using two normal ints - Temporary - Use only for testing purposes?. */
-fInt Add (int X, int Y)
-{
-       fInt A, B, Sum;
-
-       A.full = (X << SHIFT_AMOUNT);
-       B.full = (Y << SHIFT_AMOUNT);
-
-       Sum.full = A.full + B.full;
-
-       return Sum;
-}
-
 /* Conversion Functions */
-int GetReal (fInt A)
+static int GetReal (fInt A)
 {
        return (A.full >> SHIFT_AMOUNT);
 }
 
-/* Temporarily Disabled */
-int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */
-{
-       /* ROUNDING TEMPORARLY DISABLED
-       int temp = A.full;
-       int decimal_cutoff, decimal_mask = 0x000001FF;
-       decimal_cutoff = temp & decimal_mask;
-       if (decimal_cutoff > 0x147) {
-               temp += 673;
-       }*/
-
-       return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */
-}
-
-fInt Multiply (int X, int Y)
-{
-       fInt A, B, Product;
-
-       A.full = X << SHIFT_AMOUNT;
-       B.full = Y << SHIFT_AMOUNT;
-
-       Product = fMultiply(A, B);
-
-       return Product;
-}
-
-fInt Divide (int X, int Y)
+static fInt Divide (int X, int Y)
 {
        fInt A, B, Quotient;
 
@@ -555,7 +506,7 @@ fInt Divide (int X, int Y)
        return Quotient;
 }
 
-int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
+static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
 {
        int dec[PRECISION];
        int i, scaledDecimal = 0, tmp = A.partial.decimal;
@@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege
        return scaledDecimal;
 }
 
-int uPow(int base, int power)
+static int uPow(int base, int power)
 {
        if (power == 0)
                return 1;
@@ -578,15 +529,7 @@ int uPow(int base, int power)
                return (base)*uPow(base, power - 1);
 }
 
-fInt fAbs(fInt A)
-{
-       if (A.partial.real < 0)
-               return (fMultiply(A, ConvertToFraction(-1)));
-       else
-               return A;
-}
-
-int uAbs(int X)
+static int uAbs(int X)
 {
        if (X < 0)
                return (X * -1);
@@ -594,7 +537,7 @@ int uAbs(int X)
                return X;
 }
 
-fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
+static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
 {
        fInt solution;
 
index 35bc8a29b773c6378f10cf67f3f43f3998c4d604..6c321b0d8a1eb8483c45bf563f79adb000a87865 100644 (file)
@@ -810,6 +810,19 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
        return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
 }
 
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+                               uint32_t *vol_rep_time, uint32_t *bb_rep_time)
+{
+       const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE(NULL != powerplay_tab,
+                           "Missing PowerPlay Table!", return -EINVAL);
+
+       *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime);
+       *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime);
+
+       return 0;
+}
 
 int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
                                     unsigned long *num_of_entries)
index 30434802417e5254ebbe75a070103596bd33f03e..baddaa75693bce2c8948844cb9cf75ee4ae7d533 100644 (file)
@@ -32,16 +32,19 @@ struct pp_hw_power_state;
 extern const struct pp_table_func pptable_funcs;
 
 typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
-                                       struct pp_hw_power_state *hw_ps,
-                                                       unsigned int index,
-                                                const void *clock_info);
+                                               struct pp_hw_power_state *hw_ps,
+                                               unsigned int index,
+                                               const void *clock_info);
 
 int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
-                                    unsigned long *num_of_entries);
+                                unsigned long *num_of_entries);
 
 int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
-                                               unsigned long entry_index,
-                                               struct pp_power_state *ps,
-                               pp_tables_hw_clock_info_callback func);
+                       unsigned long entry_index,
+                       struct pp_power_state *ps,
+                       pp_tables_hw_clock_info_callback func);
+
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+                                uint32_t *vol_rep_time, uint32_t *bb_rep_time);
 
 #endif
index 6c4553cf80230f381aeceedfd29774f80205170f..c7dc111221c2f2766611cadf4417cd925e66d2fe 100644 (file)
@@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if (0 == data->sclk_dpm_key_disabled) {
                /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
                PP_ASSERT_WITH_CODE(
-                               (0 == tonga_is_dpm_running(hwmgr)),
+                               !tonga_is_dpm_running(hwmgr),
                                "Trying to Disable SCLK DPM when DPM is disabled",
                                return -1
                                );
@@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if (0 == data->mclk_dpm_key_disabled) {
                /* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
                PP_ASSERT_WITH_CODE(
-                               (0 == tonga_is_dpm_running(hwmgr)),
+                               !tonga_is_dpm_running(hwmgr),
                                "Trying to Disable MCLK DPM when DPM is disabled",
                                return -1
                                );
@@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
        if (0 == data->pcie_dpm_key_disabled) {
                /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
                PP_ASSERT_WITH_CODE(
-                               (0 == tonga_is_dpm_running(hwmgr)),
+                               !tonga_is_dpm_running(hwmgr),
                                "Trying to Disable PCIE DPM when DPM is disabled",
                                return -1
                                );
@@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
 
        /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
        PP_ASSERT_WITH_CODE(
-                       (0 == tonga_is_dpm_running(hwmgr)),
+                       !tonga_is_dpm_running(hwmgr),
                        "Trying to Disable Voltage CNTL when DPM is disabled",
                        return -1
                        );
@@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
        uint32_t level_mask = 1 << n;
 
        /* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-       PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
-                       "Trying to force SCLK when DPM is disabled", return -1;);
+       PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                           "Trying to force SCLK when DPM is disabled",
+                           return -1;);
        if (0 == data->sclk_dpm_key_disabled)
                return (0 == smum_send_msg_to_smc_with_parameter(
                                                             hwmgr->smumgr,
@@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
        uint32_t level_mask = 1 << n;
 
        /* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-       PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
-                       "Trying to Force MCLK when DPM is disabled", return -1;);
+       PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                           "Trying to Force MCLK when DPM is disabled",
+                           return -1;);
        if (0 == data->mclk_dpm_key_disabled)
                return (0 == smum_send_msg_to_smc_with_parameter(
                                                                hwmgr->smumgr,
@@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
 
        /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-       PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
-                       "Trying to Force PCIE level when DPM is disabled", return -1;);
+       PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                           "Trying to Force PCIE level when DPM is disabled",
+                           return -1;);
        if (0 == data->pcie_dpm_key_disabled)
                return (0 == smum_send_msg_to_smc_with_parameter(
                                                             hwmgr->smumgr,
@@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
 
        uint32_t tmp;
        int result;
-       bool error = 0;
+       bool error = false;
 
        result = tonga_read_smc_sram_dword(hwmgr->smumgr,
                                SMU72_FIRMWARE_HEADER_LOCATION +
@@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
 {
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
 
-       data->uvd_power_gated = 0;
-       data->vce_power_gated = 0;
-       data->samu_power_gated = 0;
-       data->acp_power_gated = 0;
-       data->pg_acp_init = 1;
+       data->uvd_power_gated = false;
+       data->vce_power_gated = false;
+       data->samu_power_gated = false;
+       data->acp_power_gated = false;
+       data->pg_acp_init = true;
 
        return 0;
 }
@@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
         * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
         * whereas voltage control is a fundemental change that will not be disabled
         */
-       return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1);
+       return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
 }
 
 /**
@@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
 {
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
 
-       if (0 != tonga_is_dpm_running(hwmgr)) {
+       if (tonga_is_dpm_running(hwmgr)) {
                /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
                if (!data->dpm_table_start) {
                        return 1;
@@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
 {
        uint32_t table_size, i, j;
        uint16_t vvalue;
-       bool bVoltageFound = 0;
+       bool bVoltageFound = false;
        pp_atomctrl_voltage_table *table;
 
        PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
@@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
 
        for (i = 0; i < voltage_table->count; i++) {
                vvalue = voltage_table->entries[i].value;
-               bVoltageFound = 0;
+               bVoltageFound = false;
 
                for (j = 0; j < table->count; j++) {
                        if (vvalue == table->entries[j].value) {
-                               bVoltageFound = 1;
+                               bVoltageFound = true;
                                break;
                        }
                }
@@ -1302,7 +1305,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        table->Smio[count] |=
                                data->mvdd_voltage_table.entries[count].smio_low;
                }
-               table->SmioMask2 = data->vddci_voltage_table.mask_low;
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
 
                CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
        }
@@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
 {
        uint32_t count;
        uint8_t index;
-       int result = 0;
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
        struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
@@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
                }
        }
 
-       return result;
+       return 0;
 }
 
 
@@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level(
 
        if ((data->mclk_stutter_mode_threshold != 0) &&
            (memory_clock <= data->mclk_stutter_mode_threshold) &&
-           (data->is_uvd_enabled == 0)
+           (!data->is_uvd_enabled)
            && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
            && (data->display_timing.num_existing_displays <= 2)
            && (data->display_timing.num_existing_displays != 0))
@@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table(
 
        dpm_table->count = count;
        for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
-               dpm_table->dpm_levels[i].enabled = 0;
+               dpm_table->dpm_levels[i].enabled = false;
        }
 
        return 0;
@@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry(
 {
        dpm_table->dpm_levels[index].value = pcie_gen;
        dpm_table->dpm_levels[index].param1 = pcie_lanes;
-       dpm_table->dpm_levels[index].enabled = 1;
+       dpm_table->dpm_levels[index].enabled = true;
 }
 
 static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
@@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
                                allowed_vdd_sclk_table->entries[i].clk) {
                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
                                allowed_vdd_sclk_table->entries[i].clk;
-                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+                       data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
                        data->dpm_table.sclk_table.count++;
                }
        }
@@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
                        allowed_vdd_mclk_table->entries[i].clk) {
                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
                                allowed_vdd_mclk_table->entries[i].clk;
-                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+                       data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
                        data->dpm_table.mclk_table.count++;
                }
        }
@@ -3134,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
 
        if (0 == data->sclk_dpm_key_disabled) {
                /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-               if (0 != tonga_is_dpm_running(hwmgr))
+               if (tonga_is_dpm_running(hwmgr))
                        printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
 
                if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -3149,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
 
        if (0 == data->mclk_dpm_key_disabled) {
                /* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-               if (0 != tonga_is_dpm_running(hwmgr))
+               if (tonga_is_dpm_running(hwmgr))
                        printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
 
                if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
@@ -3260,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm
 
        /* initialize vddc_dep_on_dal_pwrl table */
        table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
-       table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+       table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
 
        if (NULL == table_clk_vlt) {
                printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
@@ -3335,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
        int result = 1;
 
-       PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr),
-               "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
-                                                       return result);
+       PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
+                            "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
+                            return result);
 
        if (0 == data->pcie_dpm_key_disabled) {
                PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
@@ -3741,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
 
 bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
 {
-       bool result = 1;
+       bool result = true;
 
        switch (inReg) {
        case  mmMC_SEQ_RAS_TIMING:
@@ -3825,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
                break;
 
        default:
-               result = 0;
+               result = false;
                break;
        }
 
@@ -4449,7 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
        hwmgr->backend = data;
 
-       data->dll_defaule_on = 0;
+       data->dll_defaule_on = false;
        data->sram_end = SMC_RAM_END;
 
        data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
@@ -4555,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
        /* ULV Support*/
        ulv = &(data->ulv);
-       ulv->ulv_supported = 0;
+       ulv->ulv_supported = false;
 
        /* Initalize Dynamic State Adjustment Rule Settings*/
        result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
        if (result)
                printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
-       data->uvd_enabled = 0;
+       data->uvd_enabled = false;
 
        table = &(data->smc_state_table);
 
@@ -4608,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                PHM_PlatformCaps_SMU7);
 
-       data->vddc_phase_shed_control = 0;
+       data->vddc_phase_shed_control = false;
 
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                      PHM_PlatformCaps_UVDPowerGating);
@@ -4627,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        }
 
        if (0 == result) {
-               data->is_tlu_enabled = 0;
+               data->is_tlu_enabled = false;
                hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
                        TONGA_MAX_HARDWARE_POWERLEVELS;
                hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -5308,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->sclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table &
                (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-               PP_ASSERT_WITH_CODE(
-                       0 == tonga_is_dpm_running(hwmgr),
-                       "Trying to freeze SCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                                   "Trying to freeze SCLK DPM when DPM is disabled",
                        );
                PP_ASSERT_WITH_CODE(
                        0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5322,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->mclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table &
                 DPMTABLE_OD_UPDATE_MCLK)) {
-               PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
-                       "Trying to freeze MCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                                   "Trying to freeze MCLK DPM when DPM is disabled",
                        );
                PP_ASSERT_WITH_CODE(
                        0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5458,7 +5459,6 @@ static    int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
 
 static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
 {
-       int result = 0;
        struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
        uint32_t high_limit_count;
 
@@ -5478,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe
                                                hw_state->performance_levels[0].memory_clock,
                                                hw_state->performance_levels[high_limit_count].memory_clock);
 
-       return result;
+       return 0;
 }
 
 static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
@@ -5625,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                (data->need_update_smu7_dpm_table &
                (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
 
-               PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
-                       "Trying to Unfreeze SCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                                   "Trying to Unfreeze SCLK DPM when DPM is disabled",
                        );
                PP_ASSERT_WITH_CODE(
                         0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5638,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
        if ((0 == data->mclk_dpm_key_disabled) &&
                (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
 
-               PP_ASSERT_WITH_CODE(
-                               0 == tonga_is_dpm_running(hwmgr),
-                               "Trying to Unfreeze MCLK DPM when DPM is disabled",
+               PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+                                   "Trying to Unfreeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(
                         0 == smum_send_msg_to_smc(hwmgr->smumgr,
index 94d6b472e1fe40bb0ca5718704c03cbfec93458d..cfb647f76cbe91df4c355411b2254dc54efb9848 100644 (file)
@@ -167,8 +167,7 @@ static int get_vddc_lookup_table(
        table_size = sizeof(uint32_t) +
                sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
 
-       table = (phm_ppt_v1_voltage_lookup_table *)
-               kzalloc(table_size, GFP_KERNEL);
+       table = kzalloc(table_size, GFP_KERNEL);
 
        if (NULL == table)
                return -ENOMEM;
@@ -302,7 +301,7 @@ static int init_dpm_2_parameters(
                        (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
 
                if (0 != powerplay_table->usPPMTableOffset) {
-                       if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
+                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
                                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_EnablePlatformPowerManagement);
                        }
@@ -327,7 +326,7 @@ static int get_valid_clk(
        table_size = sizeof(uint32_t) +
                sizeof(uint32_t) * clk_volt_pp_table->count;
 
-       table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL);
+       table = kzalloc(table_size, GFP_KERNEL);
 
        if (NULL == table)
                return -ENOMEM;
@@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table(
        table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
                * mclk_dep_table->ucNumEntries;
 
-       mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-               kzalloc(table_size, GFP_KERNEL);
+       mclk_table = kzalloc(table_size, GFP_KERNEL);
 
        if (NULL == mclk_table)
                return -ENOMEM;
@@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table(
                table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
                        * tonga_table->ucNumEntries;
 
-               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+               sclk_table = kzalloc(table_size, GFP_KERNEL);
 
                if (NULL == sclk_table)
                        return -ENOMEM;
@@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table(
                table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
                        * polaris_table->ucNumEntries;
 
-               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+               sclk_table = kzalloc(table_size, GFP_KERNEL);
 
                if (NULL == sclk_table)
                        return -ENOMEM;
@@ -504,7 +500,7 @@ static int get_pcie_table(
                table_size = sizeof(uint32_t) +
                        sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
 
-               pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+               pcie_table = kzalloc(table_size, GFP_KERNEL);
 
                if (pcie_table == NULL)
                        return -ENOMEM;
@@ -541,7 +537,7 @@ static int get_pcie_table(
                table_size = sizeof(uint32_t) +
                        sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
 
-               pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+               pcie_table = kzalloc(table_size, GFP_KERNEL);
 
                if (pcie_table == NULL)
                        return -ENOMEM;
@@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table(
        table_size = sizeof(uint32_t) +
                sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
                * mm_dependency_table->ucNumEntries;
-       mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
-               kzalloc(table_size, GFP_KERNEL);
+       mm_table = kzalloc(table_size, GFP_KERNEL);
 
        if (NULL == mm_table)
                return -ENOMEM;
@@ -1073,7 +1068,6 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
 
 int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
 {
-       int result = 0;
        struct phm_ppt_v1_information *pp_table_information =
                (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
@@ -1113,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
        kfree(hwmgr->pptable);
        hwmgr->pptable = NULL;
 
-       return result;
+       return 0;
 }
 
 const struct pp_table_func tonga_pptable_funcs = {
index 0bbc42a224e5fea004933abd511b875bdf79c6db..bf0d2accf7bff5d23e8e89895c468481b03fdb46 100644 (file)
@@ -415,6 +415,8 @@ struct phm_cac_tdp_table {
        uint8_t  ucVr_I2C_Line;
        uint8_t  ucPlx_I2C_address;
        uint8_t  ucPlx_I2C_Line;
+       uint32_t usBoostPowerLimit;
+       uint8_t  ucCKS_LDO_REFSEL;
 };
 
 struct phm_ppm_table {
index d41d37ab5b7c59f586f32c3133240230de93d597..b8f4b73c322e1b0a501cef8a774b0c4265f15cb6 100644 (file)
@@ -392,6 +392,8 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_SetGpuPllDfsForSclk         ((uint16_t) 0x300)
 #define PPSMC_MSG_Didt_Block_Function            ((uint16_t) 0x301)
 
+#define PPSMC_MSG_SetVBITimeout               ((uint16_t) 0x306)
+
 #define PPSMC_MSG_SecureSRBMWrite             ((uint16_t) 0x600)
 #define PPSMC_MSG_SecureSRBMRead              ((uint16_t) 0x601)
 #define PPSMC_MSG_SetAddress                  ((uint16_t) 0x800)
index b85ff5400e57eb2ca78a2e1500bfbf8369f57cf6..899d6d8108c210fb6b5548f3f965e29a02b80f8b 100644 (file)
@@ -270,7 +270,8 @@ struct SMU74_Discrete_DpmTable {
        uint8_t                             BootPhases;
 
        uint8_t                             VRHotLevel;
-       uint8_t                             Reserved1[3];
+       uint8_t                             LdoRefSel;
+       uint8_t                             Reserved1[2];
        uint16_t                            FanStartTemperature;
        uint16_t                            FanStopTemperature;
        uint16_t                            MaxVoltage;
index fc9e3d1dd4098fe4115c19099c6e98e52dea1c1d..3c235f0177cd9e0667b9d0deab7860efec7ac061 100644 (file)
@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle);
        smum_wait_on_indirect_register(smumgr,                          \
                                mm##port##_INDEX, index, value, mask)
 
+#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask)    \
+           SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval)                          \
+           SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+                                   SMUM_FIELD_MASK(reg, field) )
 
 #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr,         \
                                                        index, value, mask) \
@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle);
                (SMUM_FIELD_MASK(reg, field) & ((field_val) <<                 \
                        SMUM_FIELD_SHIFT(reg, field))))
 
+#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
+           SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+                          reg, field)
+
 #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr,           \
                                port, index, value, mask)               \
        smum_wait_on_indirect_register(smumgr,                          \
@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle);
                        SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
                        reg, field, fieldval))
 
+
+#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval)                  \
+               cgs_write_ind_register(device, port, ix##reg,                           \
+                       SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg),    \
+                                      reg, field, fieldval))
+
+
 #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
        SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg,             \
                (fieldval) << SMUM_FIELD_SHIFT(reg, field),             \
@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle);
        SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg,     \
                (fieldval) << SMUM_FIELD_SHIFT(reg, field),             \
                SMUM_FIELD_MASK(reg, field))
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask)    \
+       smum_wait_for_indirect_register_unequal(smumgr,                 \
+               mm##port##_INDEX, index, value, mask)
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask)    \
+           SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval)                          \
+           SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+                                   SMUM_FIELD_MASK(reg, field) )
+
 #endif
index b22722eabafcbc53477dd284a2671326443ec011..f42c536b3af10a58bb12b95fd6ecdd52a9e0b0b0 100644 (file)
@@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
        struct tonga_smumgr *tonga_smu =
                (struct tonga_smumgr *)(smumgr->backend);
        uint16_t fw_to_load;
-       int result = 0;
        struct SMU_DRAMData_TOC *toc;
        /**
         * First time this gets called during SmuMgr init,
@@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
                smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
                "Fail to Request SMU Load uCode", return 0);
 
-       return result;
+       return 0;
 }
 
 static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
index 70ff09d10885f3d19828233f6bddcc1e227add5c..ef312bb75fda094b1f51bac52ce391d459341ca0 100644 (file)
@@ -393,12 +393,13 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
                        s_job->s_fence->parent = NULL;
                }
        }
+       atomic_set(&sched->hw_rq_count, 0);
        spin_unlock(&sched->job_list_lock);
 }
 
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
 {
-       struct amd_sched_job *s_job;
+       struct amd_sched_job *s_job, *tmp;
        int r;
 
        spin_lock(&sched->job_list_lock);
@@ -407,9 +408,13 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
        if (s_job)
                schedule_delayed_work(&s_job->work_tdr, sched->timeout);
 
-       list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+       list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct amd_sched_fence *s_fence = s_job->s_fence;
-               struct fence *fence = sched->ops->run_job(s_job);
+               struct fence *fence;
+
+               spin_unlock(&sched->job_list_lock);
+               fence = sched->ops->run_job(s_job);
+               atomic_inc(&sched->hw_rq_count);
                if (fence) {
                        s_fence->parent = fence_get(fence);
                        r = fence_add_callback(fence, &s_fence->cb,
@@ -424,6 +429,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
                        DRM_ERROR("Failed to run job!\n");
                        amd_sched_process_job(NULL, &s_fence->cb);
                }
+               spin_lock(&sched->job_list_lock);
        }
        spin_unlock(&sched->job_list_lock);
 }
index f9a13b658fea667e9f5592fe6f619f3d9929ecc3..f47d88ba4fa53c24979adbcaa9ba2172dd2b77c4 100644 (file)
@@ -2,7 +2,6 @@ config DRM_ARCPGU
        tristate "ARC PGU"
        depends on DRM && OF
        select DRM_KMS_CMA_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_HELPER
        help
          Choose this option if you have an ARC PGU controller.
index ccbdadb108dcdf531760c8848519cdeb61912dd7..6d4ff34737cb73746ce37dd7b4c00293d74c1283 100644 (file)
@@ -28,8 +28,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
 {
        struct arcpgu_drm_private *arcpgu = dev->dev_private;
 
-       if (arcpgu->fbdev)
-               drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
+       drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
 }
 
 static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
@@ -49,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
        drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
 }
 
-int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        int ret;
 
@@ -104,10 +103,8 @@ static int arcpgu_load(struct drm_device *drm)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(arcpgu->regs)) {
-               dev_err(drm->dev, "Could not remap IO mem\n");
+       if (IS_ERR(arcpgu->regs))
                return PTR_ERR(arcpgu->regs);
-       }
 
        dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
                 arc_pgu_read(arcpgu, ARCPGU_REG_ID));
@@ -127,10 +124,11 @@ static int arcpgu_load(struct drm_device *drm)
        encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
        if (encoder_node) {
                ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+               of_node_put(encoder_node);
                if (ret < 0)
                        return ret;
        } else {
-               ret = arcpgu_drm_sim_init(drm, 0);
+               ret = arcpgu_drm_sim_init(drm, NULL);
                if (ret < 0)
                        return ret;
        }
@@ -151,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm)
        return 0;
 }
 
-int arcpgu_unload(struct drm_device *drm)
+static int arcpgu_unload(struct drm_device *drm)
 {
        struct arcpgu_drm_private *arcpgu = drm->dev_private;
 
index 1b2906568a48ef3a09250b702b80b116eac162c6..9a18e1bd57b427f7362366ffe9af64d02b21d63c 100644 (file)
@@ -9,7 +9,6 @@ config DRM_HDLCD
        depends on COMMON_CLK
        select DRM_ARM
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        help
          Choose this option if you have an ARM High Definition Colour LCD
index 74279be20b759c9065a21198e8fcba0f4dc63f92..d83b46a3032765c85fc8b00534ac550720002c8f 100644 (file)
@@ -102,8 +102,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
 {
        struct hdlcd_drm_private *hdlcd = drm->dev_private;
 
-       if (hdlcd->fbdev)
-               drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
+       drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
 }
 
 static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
index e5b44e92f8cf2355bcce1b6e56ad9a98e9f7ad7b..82171d223f2d4f469ebfeac54aeba04cab4b015a 100644 (file)
@@ -257,6 +257,7 @@ static int malidp_bind(struct device *dev)
 {
        struct resource *res;
        struct drm_device *drm;
+       struct device_node *ep;
        struct malidp_drm *malidp;
        struct malidp_hw_device *hwdev;
        struct platform_device *pdev = to_platform_device(dev);
@@ -284,10 +285,8 @@ static int malidp_bind(struct device *dev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hwdev->regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(hwdev->regs)) {
-               DRM_ERROR("Failed to map control registers area\n");
+       if (IS_ERR(hwdev->regs))
                return PTR_ERR(hwdev->regs);
-       }
 
        hwdev->pclk = devm_clk_get(dev, "pclk");
        if (IS_ERR(hwdev->pclk))
@@ -360,11 +359,14 @@ static int malidp_bind(struct device *dev)
                goto register_fail;
 
        /* Set the CRTC's port so that the encoder component can find it */
-       malidp->crtc.port = of_graph_get_next_endpoint(dev->of_node, NULL);
+       ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+       if (!ep) {
+               ret = -EINVAL;
+               goto port_fail;
+       }
+       malidp->crtc.port = of_get_next_parent(ep);
 
        ret = component_bind_all(dev, drm);
-       of_node_put(malidp->crtc.port);
-
        if (ret) {
                DRM_ERROR("Failed to bind all components\n");
                goto bind_fail;
@@ -402,6 +404,9 @@ vblank_fail:
 irq_init_fail:
        component_unbind_all(dev, drm);
 bind_fail:
+       of_node_put(malidp->crtc.port);
+       malidp->crtc.port = NULL;
+port_fail:
        drm_dev_unregister(drm);
 register_fail:
        malidp_de_planes_destroy(drm);
@@ -435,6 +440,8 @@ static void malidp_unbind(struct device *dev)
        malidp_de_irq_fini(drm);
        drm_vblank_cleanup(drm);
        component_unbind_all(dev, drm);
+       of_node_put(malidp->crtc.port);
+       malidp->crtc.port = NULL;
        drm_dev_unregister(drm);
        malidp_de_planes_destroy(drm);
        drm_mode_config_cleanup(drm);
index eb773e9af313a6715f21dcc1ede2cf98eee07afe..15f3ecfb16f1f794caca89e282811b2083e84e80 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_ARMADA
        tristate "DRM support for Marvell Armada SoCs"
        depends on DRM && HAVE_CLK && ARM
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        help
          Support the "LCD" controllers found on the Marvell Armada 510
          devices.  There are two controllers on the device, each controller
index 8a784c460c89d8963effbb25b4509878d2003bc5..15f6ce7acb2a21400b0d39765b90241a839f525a 100644 (file)
@@ -2,11 +2,7 @@ config DRM_AST
        tristate "AST server chips"
        depends on DRM && PCI
        select DRM_TTM
-       select FB_SYS_COPYAREA
-       select FB_SYS_FILLRECT
-       select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         Say yes for experimental AST GPU driver. Do not enable
index 7bc3aa6dda8cff86f78ec0400bac76d852ebec95..904beaa932d03f0b5ce7d89a13d16729582052e0 100644 (file)
@@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev)
 static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
-       if (ast_fb->obj)
-               drm_gem_object_unreference_unlocked(ast_fb->obj);
 
+       drm_gem_object_unreference_unlocked(ast_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index 99b4f0698a3035c08b3d2836a34aa923cbbc6bf8..32bcc4bad06ac4cb18134b511e2b31b62c7b1ef0 100644 (file)
@@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC
        depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_PANEL
        help
index 5f8b0c2b9a4461d2e07b1d7a3abb25f69837fc90..f739763f47ce8cedd91205002e6f3fca02126739 100644 (file)
@@ -2,10 +2,6 @@ config DRM_BOCHS
        tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
        depends on DRM && PCI
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        select DRM_TTM
        help
          Choose this option for qemu.
index b109fdcaa679192d1ab6fdd8149091d7f68e1e1f..5c5638a777a12ad12c1e0fdc8d9627d7d78ea50b 100644 (file)
@@ -465,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
 static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
-       if (bochs_fb->obj)
-               drm_gem_object_unreference_unlocked(bochs_fb->obj);
+
+       drm_gem_object_unreference_unlocked(bochs_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index 5cd8dd7e5904e701ade77f2f652238f0bcdfbfd1..583b8ce614e3ba68b5d4cc93c580da92d4bd6226 100644 (file)
@@ -636,9 +636,7 @@ static int ps8622_remove(struct i2c_client *client)
 {
        struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
 
-       if (ps8622->bl)
-               backlight_device_unregister(ps8622->bl);
-
+       backlight_device_unregister(ps8622->bl);
        drm_bridge_remove(&ps8622->bridge);
 
        return 0;
index 9864559e5fb994ec47e9b5ceb13246c9682491fe..04b3c161dfae6fc9c643229f2a312eb44abca62a 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_CIRRUS_QEMU
        tristate "Cirrus driver for QEMU emulated device"
        depends on DRM && PCI
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         This is a KMS driver for emulated cirrus device in qemu.
index 32d32c5b7b17448bdb252301d5cea65f3e3cacbf..80446e2d3ab6efd00f3d3e777e4178b26ee6ae5d 100644 (file)
@@ -17,8 +17,8 @@
 static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-       if (cirrus_fb->obj)
-               drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+
+       drm_gem_object_unreference_unlocked(cirrus_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index 9359be4a0ca971d6d3cb95b2e276eb5433039532..8d2f111fa113363e83218f641f677435b7da832b 100644 (file)
@@ -404,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob,
        if (old_blob == new_blob)
                return;
 
-       if (old_blob)
-               drm_property_unreference_blob(old_blob);
+       drm_property_unreference_blob(old_blob);
        if (new_blob)
                drm_property_reference_blob(new_blob);
        *blob = new_blob;
@@ -1589,72 +1588,6 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_atomic_clean_old_fb);
 
-int drm_atomic_remove_fb(struct drm_framebuffer *fb)
-{
-       struct drm_modeset_acquire_ctx ctx;
-       struct drm_device *dev = fb->dev;
-       struct drm_atomic_state *state;
-       struct drm_plane *plane;
-       int ret = 0;
-       unsigned plane_mask;
-
-       state = drm_atomic_state_alloc(dev);
-       if (!state)
-               return -ENOMEM;
-
-       drm_modeset_acquire_init(&ctx, 0);
-       state->acquire_ctx = &ctx;
-
-retry:
-       plane_mask = 0;
-       ret = drm_modeset_lock_all_ctx(dev, &ctx);
-       if (ret)
-               goto unlock;
-
-       drm_for_each_plane(plane, dev) {
-               struct drm_plane_state *plane_state;
-
-               if (plane->state->fb != fb)
-                       continue;
-
-               plane_state = drm_atomic_get_plane_state(state, plane);
-               if (IS_ERR(plane_state)) {
-                       ret = PTR_ERR(plane_state);
-                       goto unlock;
-               }
-
-               drm_atomic_set_fb_for_plane(plane_state, NULL);
-               ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
-               if (ret)
-                       goto unlock;
-
-               plane_mask |= BIT(drm_plane_index(plane));
-
-               plane->old_fb = plane->fb;
-               plane->fb = NULL;
-       }
-
-       if (plane_mask)
-               ret = drm_atomic_commit(state);
-
-unlock:
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
-       if (ret == -EDEADLK) {
-               drm_modeset_backoff(&ctx);
-               goto retry;
-       }
-
-       if (ret || !plane_mask)
-               drm_atomic_state_free(state);
-
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-
-       return ret;
-}
-
 int drm_mode_atomic_ioctl(struct drm_device *dev,
                          void *data, struct drm_file *file_priv)
 {
index 9d3f80efc9cc71aa747153f35c25d446be2c11e1..f1d9f0569d7f86514773e517bafba9326de82a45 100644 (file)
@@ -613,11 +613,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
         * in this manner.
         */
        if (drm_framebuffer_read_refcount(fb) > 1) {
-               if (dev->mode_config.funcs->atomic_commit) {
-                       drm_atomic_remove_fb(fb);
-                       goto out;
-               }
-
                drm_modeset_lock_all(dev);
                /* remove from any CRTC */
                drm_for_each_crtc(crtc, dev) {
@@ -635,7 +630,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
                drm_modeset_unlock_all(dev);
        }
 
-out:
        drm_framebuffer_unreference(fb);
 }
 EXPORT_SYMBOL(drm_framebuffer_remove);
@@ -934,11 +928,11 @@ int drm_connector_init(struct drm_device *dev,
        connector->dev = dev;
        connector->funcs = funcs;
 
-       connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
-       if (connector->connector_id < 0) {
-               ret = connector->connector_id;
+       ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+       if (ret < 0)
                goto out_put;
-       }
+       connector->index = ret;
+       ret = 0;
 
        connector->connector_type = connector_type;
        connector->connector_type_id =
@@ -986,7 +980,7 @@ out_put_type_id:
                ida_remove(connector_ida, connector->connector_type_id);
 out_put_id:
        if (ret)
-               ida_remove(&config->connector_ida, connector->connector_id);
+               ida_remove(&config->connector_ida, connector->index);
 out_put:
        if (ret)
                drm_mode_object_unregister(dev, &connector->base);
@@ -1030,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
                   connector->connector_type_id);
 
        ida_remove(&dev->mode_config.connector_ida,
-                  connector->connector_id);
+                  connector->index);
 
        kfree(connector->display_info.bus_formats);
        drm_mode_object_unregister(dev, &connector->base);
@@ -1113,6 +1107,15 @@ void drm_connector_unregister(struct drm_connector *connector)
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
+static void drm_connector_unregister_all(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+
+       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               drm_connector_unregister(connector);
+}
+
 static int drm_connector_register_all(struct drm_device *dev)
 {
        struct drm_connector *connector;
@@ -1136,26 +1139,6 @@ err:
        return ret;
 }
 
-/**
- * drm_connector_unregister_all - unregister connector userspace interfaces
- * @dev: drm device
- *
- * This functions unregisters all connectors from sysfs and other places so
- * that userspace can no longer access them. Drivers should call this as the
- * first step tearing down the device instace, or when the underlying
- * physical device disappeared (e.g. USB unplug), right before calling
- * drm_dev_unregister().
- */
-void drm_connector_unregister_all(struct drm_device *dev)
-{
-       struct drm_connector *connector;
-
-       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-               drm_connector_unregister(connector);
-}
-EXPORT_SYMBOL(drm_connector_unregister_all);
-
 static int drm_encoder_register_all(struct drm_device *dev)
 {
        struct drm_encoder *encoder;
index b248e2238a057713813e4b2de236d95596cce7fe..47a500b90fd74370c14b719d74035699556dd578 100644 (file)
@@ -125,7 +125,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
                            struct drm_property *property, uint64_t *val);
 int drm_mode_atomic_ioctl(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
-int drm_atomic_remove_fb(struct drm_framebuffer *fb);
 
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
index 091053e995e5b28d587efc14fca2fa21a7c746c4..8f11b8741e425720b9ad25c622da7fa5b9bfbf48 100644 (file)
@@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
 
                ret = aux->transfer(aux, &msg);
 
-               if (ret > 0) {
+               if (ret >= 0) {
                        native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
                        if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
                                if (ret == size)
index 6537908050d71b8e3efe2d5e6c60b5417957f0bf..04e457117980b8d554e3561139f52cdc53dedc0d 100644 (file)
@@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        WARN_ON(!mutex_is_locked(&mgr->qlock));
 
        /* construct a chunk from the first msg in the tx_msg queue */
-       if (list_empty(&mgr->tx_msg_downq)) {
-               mgr->tx_down_in_progress = false;
+       if (list_empty(&mgr->tx_msg_downq))
                return;
-       }
-       mgr->tx_down_in_progress = true;
 
        txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
        ret = process_single_tx_qlock(mgr, txmsg, false);
@@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
                txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
                wake_up(&mgr->tx_waitq);
        }
-       if (list_empty(&mgr->tx_msg_downq)) {
-               mgr->tx_down_in_progress = false;
-               return;
-       }
 }
 
 /* called holding qlock */
@@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
 {
        mutex_lock(&mgr->qlock);
        list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
-       if (!mgr->tx_down_in_progress)
+       if (list_is_singular(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
 
 /**
  * drm_dp_mst_detect_port() - get connection status for an MST port
+ * @connector: DRM connector for this port
  * @mgr: manager for this port
  * @port: unverified pointer to a port
  *
@@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (mgr->tx_down_in_progress)
+       if (!list_empty(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
index 35c86acede38339f71b16741e3a028b7451fa167..77f357b2c3869a8bb13eea58c8c76819df34e70a 100644 (file)
@@ -1695,7 +1695,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 
        DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
                  vblwait->request.sequence, pipe);
-       vblank->last_wait = vblwait->request.sequence;
        DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
                    (((drm_vblank_count(dev, pipe) -
                       vblwait->request.sequence) <= (1 << 23)) ||
index df9bcbab922f359bd1837f5b3a5fde457e827457..56fb8637bb57ece37a880d7f8bb04231d0c67a0c 100644 (file)
@@ -535,8 +535,7 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
 
 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 {
-       if (etnaviv_obj->vaddr)
-               vunmap(etnaviv_obj->vaddr);
+       vunmap(etnaviv_obj->vaddr);
        put_pages(etnaviv_obj);
 }
 
@@ -670,9 +669,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
        return obj;
 
 fail:
-       if (obj)
-               drm_gem_object_unreference_unlocked(obj);
-
+       drm_gem_object_unreference_unlocked(obj);
        return ERR_PTR(ret);
 }
 
@@ -916,15 +913,12 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
        get_task_struct(current);
 
        ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
-       if (ret) {
-               drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
-               return ret;
-       }
+       if (ret)
+               goto unreference;
 
        ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
-
+unreference:
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
-
        return ret;
 }
index 67dcd6831291fac5f96f26d29ecd9dae8476d8e9..fb49443bfd323be91c91993ad83ef7e053b568d8 100644 (file)
@@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
        struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
        struct drm_framebuffer *fb;
 
-       if (exynos_gem->kvaddr)
-               vunmap(exynos_gem->kvaddr);
+       vunmap(exynos_gem->kvaddr);
 
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
index 1625d7c8a3196571a43f70cd2cd3eb210e38c1fc..2275efe41acd3b73af9bb80bc377666039740fce 100644 (file)
@@ -1820,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev)
                DRM_ERROR("Failed to find ddc node in device tree\n");
                return -ENODEV;
        }
+       of_node_put(dev->of_node);
 
 out_get_ddc_adpt:
        hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
@@ -1838,6 +1839,7 @@ out_get_ddc_adpt:
                ret = -ENODEV;
                goto err_ddc;
        }
+       of_node_put(dev->of_node);
 
 out_get_phy_port:
        if (hdata->drv_data->is_apb_phy) {
index b9c714de6e40cdfd2dbb48b0e5a5311076a286ca..14a72c4c496d2469eccee871f22eb4ccb52eb17a 100644 (file)
@@ -5,12 +5,7 @@ config DRM_FSL_DCU
        select BACKLIGHT_LCD_SUPPORT
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_PANEL
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
        select REGMAP_MMIO
        select VIDEOMODE_HELPERS
        help
index 17f928ec84ea77790e4fef756bee55757f42d8e1..8906d67494fc47d165ec33714ffa62c5b29294d5 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_GMA500
        tristate "Intel GMA5/600 KMS Framebuffer"
        depends on DRM && PCI && X86
-       select FB_CFB_COPYAREA
-       select FB_CFB_FILLRECT
-       select FB_CFB_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
        select ACPI_VIDEO if ACPI
index 28f9d90988ff9164f4a2830c5b621446c821e6db..563f193fcfacf972cbb40499c97e295288a00f9f 100644 (file)
@@ -246,8 +246,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
 {
        struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 
-       if (gma_encoder->i2c_bus)
-               psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+       psb_intel_i2c_destroy(gma_encoder->i2c_bus);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
index 813ef23a805466299a096dbdf8ccb414a22265f6..38dc890831483c4aef716f33c6b33453c2cc1bef 100644 (file)
@@ -444,8 +444,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
 {
        struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 
-       if (gma_encoder->i2c_bus)
-               psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+       psb_intel_i2c_destroy(gma_encoder->i2c_bus);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
@@ -780,12 +779,10 @@ out:
 failed_find:
        mutex_unlock(&dev->mode_config.mutex);
        printk(KERN_ERR "Failed find\n");
-       if (gma_encoder->ddc_bus)
-               psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+       psb_intel_i2c_destroy(gma_encoder->ddc_bus);
 failed_ddc:
        printk(KERN_ERR "Failed DDC\n");
-       if (gma_encoder->i2c_bus)
-               psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+       psb_intel_i2c_destroy(gma_encoder->i2c_bus);
 failed_blc_i2c:
        printk(KERN_ERR "Failed BLC\n");
        drm_encoder_cleanup(encoder);
index 7440bf90ac9c397f4fe888e9b9d68804bd39e8a2..0fcdce0817de2b6a65a7d41bbe88b4349a66fe1c 100644 (file)
@@ -184,12 +184,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
        return 0;
 }
 
-static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
-                                               unsigned long arg)
-{
-       return -ENOTTY;
-}
-
 static struct fb_ops psbfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
@@ -201,7 +195,6 @@ static struct fb_ops psbfb_ops = {
        .fb_imageblit = drm_fb_helper_cfb_imageblit,
        .fb_mmap = psbfb_mmap,
        .fb_sync = psbfb_sync,
-       .fb_ioctl = psbfb_ioctl,
 };
 
 static struct fb_ops psbfb_roll_ops = {
@@ -215,7 +208,6 @@ static struct fb_ops psbfb_roll_ops = {
        .fb_imageblit = drm_fb_helper_cfb_imageblit,
        .fb_pan_display = psbfb_pan,
        .fb_mmap = psbfb_mmap,
-       .fb_ioctl = psbfb_ioctl,
 };
 
 static struct fb_ops psbfb_unaccel_ops = {
@@ -228,7 +220,6 @@ static struct fb_ops psbfb_unaccel_ops = {
        .fb_copyarea = drm_fb_helper_cfb_copyarea,
        .fb_imageblit = drm_fb_helper_cfb_imageblit,
        .fb_mmap = psbfb_mmap,
-       .fb_ioctl = psbfb_ioctl,
 };
 
 /**
index 82b8ce418b276bae50b94c75f1b2b8f40af6b417..50eb944fb78a8e80fe81df7f1dc291f14e16e0f8 100644 (file)
@@ -210,10 +210,8 @@ static int psb_driver_unload(struct drm_device *dev)
                        iounmap(dev_priv->aux_reg);
                        dev_priv->aux_reg = NULL;
                }
-               if (dev_priv->aux_pdev)
-                       pci_dev_put(dev_priv->aux_pdev);
-               if (dev_priv->lpc_pdev)
-                       pci_dev_put(dev_priv->lpc_pdev);
+               pci_dev_put(dev_priv->aux_pdev);
+               pci_dev_put(dev_priv->lpc_pdev);
 
                /* Destroy VBT data */
                psb_intel_destroy_bios(dev);
index b1b93317d054f83c69e1b9c176987d899c24beff..e55733ca46d2a51be77dcaa6ca0b9de39cb632c4 100644 (file)
@@ -561,8 +561,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
        struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
        struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
 
-       if (lvds_priv->ddc_bus)
-               psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+       psb_intel_i2c_destroy(lvds_priv->ddc_bus);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
@@ -835,11 +834,9 @@ out:
 
 failed_find:
        mutex_unlock(&dev->mode_config.mutex);
-       if (lvds_priv->ddc_bus)
-               psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+       psb_intel_i2c_destroy(lvds_priv->ddc_bus);
 failed_ddc:
-       if (lvds_priv->i2c_bus)
-               psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+       psb_intel_i2c_destroy(lvds_priv->i2c_bus);
 failed_blc_i2c:
        drm_encoder_cleanup(encoder);
        drm_connector_cleanup(connector);
index b9a811750ca855f3ab9331c8abf6a88d1b9d8882..95ddd56b89f08954a4c767e149dad2d427a5de27 100644 (file)
@@ -2413,6 +2413,9 @@ static int intel_runtime_suspend(struct device *device)
 
        assert_forcewakes_inactive(dev_priv);
 
+       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+               intel_hpd_poll_init(dev_priv);
+
        DRM_DEBUG_KMS("Device suspended\n");
        return 0;
 }
index 03e1bfaa5a41da5326d41b24f12b58cf0ddb1340..915a3d0acff31fbf6b6044de4bde7a45174107ce 100644 (file)
@@ -284,6 +284,9 @@ struct i915_hotplug {
        u32 short_port_mask;
        struct work_struct dig_port_work;
 
+       struct work_struct poll_init_work;
+       bool poll_enabled;
+
        /*
         * if we get a HPD irq from DP and a HPD irq from non-DP
         * the non-DP HPD could block the workqueue on a mode config
@@ -2743,6 +2746,8 @@ struct drm_i915_cmd_table {
 #define SKL_REVID_D0           0x3
 #define SKL_REVID_E0           0x4
 #define SKL_REVID_F0           0x5
+#define SKL_REVID_G0           0x6
+#define SKL_REVID_H0           0x7
 
 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
 
@@ -2957,6 +2962,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv);
 void intel_hpd_init_work(struct drm_i915_private *dev_priv);
 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
 
 /* i915_irq.c */
 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
index 8f50919ba9b40e59b2db9c550abc29dd0dbb0356..7fd44980798fd048714e927e8a305b94795175a0 100644 (file)
@@ -1501,15 +1501,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        break;
                }
 
-               /* Ensure that even if the GPU hangs, we get woken up.
-                *
-                * However, note that if no one is waiting, we never notice
-                * a gpu hang. Eventually, we will have to wait for a resource
-                * held by the GPU and so trigger a hangcheck. In the most
-                * pathological case, this will be upon memory starvation!
-                */
-               i915_queue_hangcheck(req->i915);
-
                timeout_remain = io_schedule_timeout(timeout_remain);
                if (timeout_remain == 0) {
                        ret = -ETIME;
index 067632ad2f293e9d8655507d3135c9557cfd6aa7..6f10b421487b843cc03f0defe666c06ef060fe81 100644 (file)
@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
        if (!mutex_is_locked(mutex))
                return false;
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
        return mutex->owner == task;
 #else
        /* Since UP may be pre-empted, we cannot assume that we own the lock */
index 8b13bfa47fbaaf2be040bfcd6a420ef8aaca0e1b..b6e404c91eed5827f6c8692a57a32f374ee350b9 100644 (file)
@@ -54,8 +54,8 @@ struct i915_params i915 __read_mostly = {
        .verbose_state_checks = 1,
        .nuclear_pageflip = 0,
        .edp_vswing = 0,
-       .enable_guc_loading = -1,
-       .enable_guc_submission = -1,
+       .enable_guc_loading = 0,
+       .enable_guc_submission = 0,
        .guc_log_level = -1,
        .enable_dp_mst = true,
        .inject_load_failure = 0,
@@ -203,12 +203,12 @@ MODULE_PARM_DESC(edp_vswing,
 module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
 MODULE_PARM_DESC(enable_guc_loading,
                "Enable GuC firmware loading "
-               "(-1=auto [default], 0=never, 1=if available, 2=required)");
+               "(-1=auto, 0=never [default], 1=if available, 2=required)");
 
 module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
 MODULE_PARM_DESC(enable_guc_submission,
                "Enable GuC submission "
-               "(-1=auto [default], 0=never, 1=if available, 2=required)");
+               "(-1=auto, 0=never [default], 1=if available, 2=required)");
 
 module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
 MODULE_PARM_DESC(guc_log_level,
index 8bfde75789f60dada455a59ee69a3727d2c91e45..ce14fe09d96236a32af675b5320aaefb9d508f8c 100644 (file)
@@ -1686,6 +1686,9 @@ enum skl_disp_power_wells {
 
 #define GEN7_TLB_RD_ADDR       _MMIO(0x4700)
 
+#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
+#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1<<18)
+
 #define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
 #define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1<<28)
 
index d89b2c963618298f5e8f3b23bfdddaca46473f5a..b074f3d6d127fded56cdc49ea9326ced2407d51a 100644 (file)
@@ -93,6 +93,15 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
        if (!b->irq_enabled ||
            test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
                mod_timer(&b->fake_irq, jiffies + 1);
+
+       /* Ensure that even if the GPU hangs, we get woken up.
+        *
+        * However, note that if no one is waiting, we never notice
+        * a gpu hang. Eventually, we will have to wait for a resource
+        * held by the GPU and so trigger a hangcheck. In the most
+        * pathological case, this will be upon memory starvation!
+        */
+       i915_queue_hangcheck(i915);
 }
 
 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
index 5819d524d9179dc225e698a8d1d59da558c2aea5..827b6ef4e9aedfa669c91942d71f84e800841c38 100644 (file)
@@ -329,10 +329,25 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
        struct drm_device *dev = connector->dev;
        struct intel_crt *crt = intel_attached_crt(connector);
        struct drm_i915_private *dev_priv = to_i915(dev);
+       bool reenable_hpd;
        u32 adpa;
        bool ret;
        u32 save_adpa;
 
+       /*
+        * Doing a force trigger causes a hpd interrupt to get sent, which can
+        * get us stuck in a loop if we're polling:
+        *  - We enable power wells and reset the ADPA
+        *  - output_poll_exec does force probe on VGA, triggering a hpd
+        *  - HPD handler waits for poll to unlock dev->mode_config.mutex
+        *  - output_poll_exec shuts off the ADPA, unlocks
+        *    dev->mode_config.mutex
+        *  - HPD handler runs, resets ADPA and brings us back to the start
+        *
+        * Just disable HPD interrupts here to prevent this
+        */
+       reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+
        save_adpa = adpa = I915_READ(crt->adpa_reg);
        DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
@@ -357,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 
        DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
+       if (reenable_hpd)
+               intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+
        return ret;
 }
 
@@ -717,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector,
        return 0;
 }
 
-static void intel_crt_reset(struct drm_connector *connector)
+void intel_crt_reset(struct drm_encoder *encoder)
 {
-       struct drm_device *dev = connector->dev;
+       struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crt *crt = intel_attached_crt(connector);
+       struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
 
        if (INTEL_INFO(dev)->gen >= 5) {
                u32 adpa;
@@ -743,7 +761,6 @@ static void intel_crt_reset(struct drm_connector *connector)
  */
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
-       .reset = intel_crt_reset,
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_crt_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -762,6 +779,7 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
 };
 
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+       .reset = intel_crt_reset,
        .destroy = intel_encoder_destroy,
 };
 
@@ -904,5 +922,5 @@ void intel_crt_init(struct drm_device *dev)
                dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
        }
 
-       intel_crt_reset(connector);
+       intel_crt_reset(&crt->base.base);
 }
index c3b33a10c15cd8a6f74bcec4b7d442b786cce969..3edb9580928e5b30a35d855a87f6bcd66158b454 100644 (file)
  * be moved to FW_FAILED.
  */
 
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
 MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 26)
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
 
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
 MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
index be3b2cab2640e5c83232cf841b3b9ae70459b742..c457eed76f1f7a1fca441d4714c2aebf95534d05 100644 (file)
@@ -12016,6 +12016,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                ret = intel_color_check(crtc, crtc_state);
                if (ret)
                        return ret;
+
+               /*
+                * Changing color management on Intel hardware is
+                * handled as part of planes update.
+                */
+               crtc_state->planes_changed = true;
        }
 
        ret = 0;
@@ -13918,8 +13924,50 @@ out:
 
 #undef for_each_intel_crtc_masked
 
+/*
+ * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
+ *        drm_atomic_helper_legacy_gamma_set() directly.
+ */
+static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
+                                        u16 *red, u16 *green, u16 *blue,
+                                        uint32_t size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_crtc_state *state;
+       int ret;
+
+       ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
+       if (ret)
+               return ret;
+
+       /*
+        * Make sure we update the legacy properties so this works when
+        * atomic is not enabled.
+        */
+
+       state = crtc->state;
+
+       drm_object_property_set_value(&crtc->base,
+                                     config->degamma_lut_property,
+                                     (state->degamma_lut) ?
+                                     state->degamma_lut->base.id : 0);
+
+       drm_object_property_set_value(&crtc->base,
+                                     config->ctm_property,
+                                     (state->ctm) ?
+                                     state->ctm->base.id : 0);
+
+       drm_object_property_set_value(&crtc->base,
+                                     config->gamma_lut_property,
+                                     (state->gamma_lut) ?
+                                     state->gamma_lut->base.id : 0);
+
+       return 0;
+}
+
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-       .gamma_set = drm_atomic_helper_legacy_gamma_set,
+       .gamma_set = intel_atomic_legacy_gamma_set,
        .set_config = drm_atomic_helper_set_config,
        .set_property = drm_atomic_helper_crtc_set_property,
        .destroy = intel_crtc_destroy,
index 0c5ba3410a1e7417320f4c814a25ade303e4cd17..21b04c3eda41bf6f7685b6080b9e4a5228f3a116 100644 (file)
@@ -4336,7 +4336,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
        intel_dp->detect_done = false;
 
-       if (intel_connector->detect_edid)
+       if (is_edp(intel_dp) || intel_connector->detect_edid)
                return connector_status_connected;
        else
                return connector_status_disconnected;
index 55aeaf0417498bcfc42ca2b15d56964516b449c4..3329fc6a95f488b9e74b8f7a6deea457976a92b6 100644 (file)
@@ -1102,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
 
 /* intel_crt.c */
 void intel_crt_init(struct drm_device *dev);
-
+void intel_crt_reset(struct drm_encoder *encoder);
 
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -1425,6 +1425,8 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
 
 /* intel_dvo.c */
 void intel_dvo_init(struct drm_device *dev);
+/* intel_hotplug.c */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
 
 
 /* legacy fbdev emulation in intel_fbdev.c */
index 51434ec871f217b637de8bb197190cee7aee6af1..f48957ea100d9283b53d02f7aef82ae5cacbb2e9 100644 (file)
@@ -452,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  *
  * This is a separate step from interrupt enabling to simplify the locking rules
  * in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_init(), which enables connector polling
  */
 void intel_hpd_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
        int i;
 
        for_each_hpd_pin(i) {
                dev_priv->hotplug.stats[i].count = 0;
                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
        }
+
+       WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+       schedule_work(&dev_priv->hotplug.poll_init_work);
+
+       /*
+        * Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked checks happy.
+        */
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void i915_hpd_poll_init_work(struct work_struct *work) {
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            hotplug.poll_init_work);
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+       bool enabled;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+
        list_for_each_entry(connector, &mode_config->connector_list, head) {
-               struct intel_connector *intel_connector = to_intel_connector(connector);
+               struct intel_connector *intel_connector =
+                       to_intel_connector(connector);
                connector->polled = intel_connector->polled;
 
                /* MST has a dynamic intel_connector->encoder and it's reprobing
@@ -474,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
                        continue;
 
                if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
-                   intel_connector->encoder->hpd_pin > HPD_NONE)
-                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+                   intel_connector->encoder->hpd_pin > HPD_NONE) {
+                       connector->polled = enabled ?
+                               DRM_CONNECTOR_POLL_CONNECT |
+                               DRM_CONNECTOR_POLL_DISCONNECT :
+                               DRM_CONNECTOR_POLL_HPD;
+               }
        }
 
+       if (enabled)
+               drm_kms_helper_poll_enable_locked(dev);
+
+       mutex_unlock(&dev->mode_config.mutex);
+
        /*
-        * Interrupt setup is already guaranteed to be single-threaded, this is
-        * just to make the assert_spin_locked checks happy.
+        * We might have missed any hotplugs that happened while we were
+        * in the middle of disabling polling
         */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       if (!enabled)
+               drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ * @enabled: Whether to enable or disable polling
+ *
+ * This function enables polling for all connectors, regardless of whether or
+ * not they support hotplug detection. Under certain conditions HPD may not be
+ * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init(), which restores hpd handling.
+ */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+{
+       WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+
+       /*
+        * We might already be holding dev->mode_config.mutex, so do this in a
+        * seperate worker
+        * As well, there's no issue if we race here since we always reschedule
+        * this worker anyway
+        */
+       schedule_work(&dev_priv->hotplug.poll_init_work);
 }
 
 void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 {
        INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+       INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
        INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
                          intel_hpd_irq_storm_reenable_work);
 }
@@ -508,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 
        cancel_work_sync(&dev_priv->hotplug.dig_port_work);
        cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+       cancel_work_sync(&dev_priv->hotplug.poll_init_work);
        cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 }
+
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+       bool ret = false;
+
+       if (pin == HPD_NONE)
+               return false;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
+               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+               ret = true;
+       }
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       return ret;
+}
+
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+       if (pin == HPD_NONE)
+               return;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
index 70c699043d0ef3029903f0aa158b12885e33ed7e..414ddda4392255b1d5479c66b4d75f86235ea2e8 100644 (file)
@@ -1097,6 +1097,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *const batch,
                                                uint32_t index)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
        /*
@@ -1105,8 +1106,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
         * this batch updates GEN8_L3SQCREG4 with default value we need to
         * set this bit here to retain the WA during flush.
         */
-       if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
-           IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+           IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1267,11 +1268,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    uint32_t *offset)
 {
        int ret;
+       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
-       if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
-           IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1282,7 +1284,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 
        /* WaClearSlmSpaceAtContextSwitch:kbl */
        /* Actual scratch location is at 128 bytes offset */
-       if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
                uint32_t scratch_addr
                        = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
 
index 3c1482b8f2f44a2f187b0af692520cc1d0fb7a7e..927825f5b28447f95fd021ca5af9c62f95eda4a3 100644 (file)
@@ -66,9 +66,10 @@ struct drm_i915_mocs_table {
 #define L3_WB                  3
 
 /* Target cache */
-#define ELLC                   0
-#define LLC                    1
-#define LLC_ELLC               2
+#define LE_TC_PAGETABLE                0
+#define LE_TC_LLC              1
+#define LE_TC_LLC_ELLC         2
+#define LE_TC_LLC_ELLC_ALT     3
 
 /*
  * MOCS tables
@@ -96,34 +97,67 @@ struct drm_i915_mocs_table {
  *       end.
  */
 static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
-       /* { 0x00000009, 0x0010 } */
-       { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
-       /* { 0x00000038, 0x0030 } */
-       { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
-       /* { 0x0000003b, 0x0030 } */
-       { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+       { /* 0x00000009 */
+         .control_value = LE_CACHEABILITY(LE_UC) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0010 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+       },
+       {
+         /* 0x00000038 */
+         .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+         /* 0x0030 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
+       {
+         /* 0x0000003b */
+         .control_value = LE_CACHEABILITY(LE_WB) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+         /* 0x0030 */
+         .l3cc_value =   L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
 };
 
 /* NOTE: the LE_TGT_CACHE is not used on Broxton */
 static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
-       /* { 0x00000009, 0x0010 } */
-       { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
-       /* { 0x00000038, 0x0030 } */
-       { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
-       /* { 0x0000003b, 0x0030 } */
-       { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+       {
+         /* 0x00000009 */
+         .control_value = LE_CACHEABILITY(LE_UC) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0010 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+       },
+       {
+         /* 0x00000038 */
+         .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0030 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
+       {
+         /* 0x00000039 */
+         .control_value = LE_CACHEABILITY(LE_UC) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0030 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
 };
 
 /**
index c27d5eb063d0b21f8774cd786c7c40151b40471b..adca262d591ac02c46577ee05fa6741741433d41 100644 (file)
@@ -1072,5 +1072,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
                return -ENODEV;
        }
 
+       /*
+        * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+        * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+        * vswing instead. Low vswing results in some display flickers, so
+        * let's simply ignore the OpRegion panel type on SKL for now.
+        */
+       if (IS_SKYLAKE(dev_priv)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
        return ret - 1;
 }
index 5a8ee0c76593993345e1951581c9304d0cba54a6..f4f3fcc8b3becb59c0ed5d6bdd5dd27a177f3731 100644 (file)
@@ -57,7 +57,7 @@
 
 static void gen9_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
        I915_WRITE(CHICKEN_PAR1_1,
@@ -7046,7 +7046,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
 
 static void kabylake_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        gen9_init_clock_gating(dev);
 
@@ -7067,7 +7067,7 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
 
 static void skylake_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        gen9_init_clock_gating(dev);
 
index 61e00bf9e87f928f672ebe9c35243887725d82f4..cca7792f26d5058c99edfee0d99c76d7706b0ebe 100644 (file)
@@ -1109,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
        /* WaDisableGafsUnitClkGating:skl */
        WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
+       /* WaInPlaceDecompressionHang:skl */
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
        /* WaDisableLSQCROPERFforOCL:skl */
        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
@@ -1178,6 +1183,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                                  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
+       /* WaInPlaceDecompressionHang:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
        return 0;
 }
 
@@ -1225,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
                GEN7_HALF_SLICE_CHICKEN1,
                GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+       /* WaInPlaceDecompressionHang:kbl */
+       WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
        /* WaDisableLSQCROPERFforOCL:kbl */
        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
@@ -1305,7 +1319,8 @@ static int init_render_ring(struct intel_engine_cs *engine)
        if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+       if (INTEL_INFO(dev_priv)->gen >= 6)
+               I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 
        return init_workarounds_ring(engine);
 }
index 6b78295f53dbf4e290ee16d6154c2a848b7d7518..1c603bbe5784fa7e21049e267eab52af25a60360 100644 (file)
@@ -1078,6 +1078,7 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
 
 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
+       struct intel_encoder *encoder;
        enum pipe pipe;
 
        /*
@@ -1113,6 +1114,12 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 
        intel_hpd_init(dev_priv);
 
+       /* Re-enable the ADPA, if we have one */
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               if (encoder->type == INTEL_OUTPUT_ANALOG)
+                       intel_crt_reset(&encoder->base);
+       }
+
        i915_redisable_vga_power_on(&dev_priv->drm);
 }
 
@@ -1126,6 +1133,8 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
        synchronize_irq(dev_priv->drm.irq);
 
        intel_power_sequencer_reset(dev_priv);
+
+       intel_hpd_poll_init(dev_priv);
 }
 
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
index a1844b50546c1529cb1ec653553a74c2a2afc059..f2c9ae822149c715bfb99a18f4a1218935cf2d8c 100644 (file)
@@ -1,7 +1,6 @@
 config DRM_IMX
        tristate "DRM Support for Freescale i.MX"
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
index 5d2831dfb8b9842acff6dd194337fee8bacee1f7..b03919ed60bade2f2051aeb4fc41389f64843608 100644 (file)
@@ -297,15 +297,12 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        if (!bus_format) {
-               struct drm_connector_state *conn_state;
                struct drm_connector *connector;
-               int i;
 
-               for_each_connector_in_state(encoder->crtc->state->state,
-                                           connector, conn_state, i) {
+               drm_for_each_connector(connector, encoder->dev) {
                        struct drm_display_info *di = &connector->display_info;
 
-                       if (conn_state->crtc == encoder->crtc &&
+                       if (connector->encoder == encoder &&
                            di->num_bus_formats) {
                                bus_format = di->bus_formats[0];
                                break;
index 3a1c5fbae54a5486252e91dd4ae1c0f9fdf95a59..520e5e668d6cefd1a6204c9b7626704ccf5f27bf 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_MGAG200
        tristate "Kernel modesetting driver for MGA G200 server engines"
        depends on DRM && PCI
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         This is a KMS driver for the MGA G200 server chips, it
index 615cbb08ba29ecdfbe8a4bfa0a02b9ce86804216..13798b3e6bebbf0b8a62b39c60b9494659d5264d 100644 (file)
@@ -17,8 +17,8 @@
 static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
-       if (mga_fb->obj)
-               drm_gem_object_unreference_unlocked(mga_fb->obj);
+
+       drm_gem_object_unreference_unlocked(mga_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index 5ab13e7939db7d0085aecd41ac18be7a8ee5782f..2922a82cba8e4dd8e872ae7aae383279a32366b2 100644 (file)
@@ -3,13 +3,7 @@ config DRM_NOUVEAU
        depends on DRM && PCI
         select FW_LOADER
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
-       select FB
-       select FRAMEBUFFER_CONSOLE if !EXPERT
        select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
        select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
        select X86_PLATFORM_DEVICES if ACPI && X86
index 22706c0a54b53a9f95a1ad7e453217b7276cfcaf..49bd5da194e1b368b0d069825e6fb03b03166684 100644 (file)
@@ -40,7 +40,8 @@ static int
 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
        struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
+       const u32 soff = gf119_sor_soff(outp);
+       nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
        return 0;
 }
 
index 336ad4de9981f70755a54b69dc6f8a12161d7002..556f81f6b2c7ddd67a0587d9e13c1ea883262497 100644 (file)
@@ -4,11 +4,6 @@ config DRM_OMAP
        depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
        select OMAP2_DSS
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
        default n
        help
          DRM display driver for OMAP2/3/4 based boards.
index 75f7827525cf8e17b7a9bb643dd72dc532ac5b0f..684b7aeda411a859299930d174e953450df58775 100644 (file)
@@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev)
        adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
        if (adapter_node) {
                adapter = of_get_i2c_adapter_by_node(adapter_node);
+               of_node_put(adapter_node);
                if (adapter == NULL) {
                        dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
                        omap_dss_put_device(ddata->in);
index 1b0cf2d8224b3ade7cf2aeb124db6e58c5faebf4..0eae8afaed90bb08fe49bf32f3559bb0bec739db 100644 (file)
@@ -1284,8 +1284,7 @@ static int dsicm_probe(struct platform_device *pdev)
        return 0;
 
 err_sysfs_create:
-       if (bldev != NULL)
-               backlight_device_unregister(bldev);
+       backlight_device_unregister(bldev);
 err_bl:
        destroy_workqueue(ddata->workqueue);
 err_reg:
index dfd4e9621e3b1c969b76ac6873a480a163d98959..e256d879b25ccc379cf93201b155f31e00269af2 100644 (file)
@@ -125,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port)
 
 static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
 {
-       struct device_node *np;
+       struct device_node *np, *np_parent;
 
        np = of_parse_phandle(node, "remote-endpoint", 0);
        if (!np)
                return NULL;
 
-       np = of_get_next_parent(np);
+       np_parent = of_get_next_parent(np);
+       of_node_put(np);
 
-       return np;
+       return np_parent;
 }
 
 struct device_node *
index 983c8cf2441c5fde79e1167089ccdd38964f4c38..31f5178c22c7577136da27e6903dc37391d4a8fa 100644 (file)
@@ -115,8 +115,8 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
 
        for (i = 0; i < n; i++) {
                struct plane *plane = &omap_fb->planes[i];
-               if (plane->bo)
-                       drm_gem_object_unreference_unlocked(plane->bo);
+
+               drm_gem_object_unreference_unlocked(plane->bo);
        }
 
        kfree(omap_fb);
index 38c2bb72e4560f72aac8edf0095acf99b2b171b3..da45b11b66b8b075fb3e9e34fb64d92588383470 100644 (file)
@@ -1,12 +1,7 @@
 config DRM_QXL
        tristate "QXL virtual GPU"
        depends on DRM && PCI
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_DEFERRED_IO
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        select CRC32
        help
index ad429683fef771f2f05da51518b9f0383279d7c2..3aef12742a53b895fac7be63651643d5db9325e2 100644 (file)
@@ -468,8 +468,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
 
-       if (qxl_fb->obj)
-               drm_gem_object_unreference_unlocked(qxl_fb->obj);
+       drm_gem_object_unreference_unlocked(qxl_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(qxl_fb);
 }
index 587cae4e73c9abeeb65d2187b9e700bb8f0fa442..56bb758f4e332dbdf45d321306ae7875bb7a00d8 100644 (file)
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
                        if (dig->backlight_level == 0)
                                atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
                        else {
index 35e0fc3ae8a71d34b03f03ab175bf71db17fb45c..7ba450832e6b7a59db9499a0919a8bb5f4a3fd1a 100644 (file)
@@ -3843,7 +3843,10 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
        if (i >= sclk_table->count) {
                pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
        } else {
-               /* XXX check display min clock requirements */
+               /* XXX The current code always reprogrammed the sclk levels,
+                * but we don't currently handle disp sclk requirements
+                * so just skip it.
+                */
                if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
                        pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
        }
index 59acd0e5c2c6384705fdda0a6b89e65be5f5abc4..31c9a92d6a1b89da90863e761fa4701f15d3ad3f 100644 (file)
@@ -741,13 +741,6 @@ int radeon_acpi_init(struct radeon_device *rdev)
                }
 
                atif->encoder_for_bl = target;
-               if (!target) {
-                       /* Brightness change notification is enabled, but we
-                        * didn't find a backlight controller, this should
-                        * never happen.
-                        */
-                       DRM_ERROR("Cannot find a backlight controller\n");
-               }
        }
 
        if (atif->functions.sbios_requests && !atif->functions.system_params) {
index f8097a0e7a79176026a4f17bdd19e3b03483f445..5df3ec73021b522fddae1b8368e68058ae7c93c5 100644 (file)
@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                    le16_to_cpu(firmware_info->info.usReferenceClock);
                p1pll->reference_div = 0;
 
-               if (crev < 2)
+               if ((frev < 2) && (crev < 2))
                        p1pll->pll_out_min =
                                le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
                else
@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                p1pll->pll_out_max =
                    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
 
-               if (crev >= 4) {
+               if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
                        p1pll->lcd_pll_out_min =
                                le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
                        if (p1pll->lcd_pll_out_min == 0)
index 86dcdf38b732176ec94865d213648953e344cf04..6de3428612027f5da11ccec1e1827bc828e4e341 100644 (file)
@@ -536,7 +536,6 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev)
 static const struct vga_switcheroo_handler radeon_atpx_handler = {
        .switchto = radeon_atpx_switchto,
        .power_state = radeon_atpx_power_state,
-       .init = radeon_atpx_init,
        .get_client_id = radeon_atpx_get_client_id,
 };
 
@@ -572,6 +571,7 @@ static bool radeon_atpx_detect(void)
                printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                radeon_atpx_priv.atpx_detected = true;
+               radeon_atpx_init();
                return true;
        }
        return false;
index 5f1cd695c9656e5d558fa4ed2c90203c39e7bfc0..c3206fb8f4cf773b9e378e2ad79f710d0b45d38d 100644 (file)
@@ -714,7 +714,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
 
        drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
        radeon_crtc->crtc_id = index;
-       radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
+       radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
        rdev->mode_info.crtcs[index] = radeon_crtc;
 
        if (rdev->family >= CHIP_BONAIRE) {
@@ -1324,9 +1324,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
 
-       if (radeon_fb->obj) {
-               drm_gem_object_unreference_unlocked(radeon_fb->obj);
-       }
+       drm_gem_object_unreference_unlocked(radeon_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(radeon_fb);
 }
index 7fc3ca5ce6c7365065b1c563ac05137bba1640ea..4c2fd056dd6da6c207e5ac944788cbc3ac2b3028 100644 (file)
@@ -6,7 +6,6 @@ config DRM_RCAR_DU
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
-       select DRM_KMS_FB_HELPER
        select VIDEOMODE_HELPERS
        help
          Choose this option if you have an R-Car chipset.
index e48611e83c03a90e47f20369829715be790b4d9b..3c58669a06cea1c7d582798d9eb6249e1d3db816 100644 (file)
@@ -4,11 +4,7 @@ config DRM_ROCKCHIP
        depends on RESET_CONTROLLER
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_PANEL
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select VIDEOMODE_HELPERS
        help
          Choose this option if you have a Rockchip soc chipset.
index e81e19a660adcbd10736a01ab89934c4b06e0bae..89aadbf465f8563cdefb8174ff5cfca608876d09 100644 (file)
@@ -96,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
        ret = rockchip_dp_pre_init(dp);
        if (ret < 0) {
                dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+               clk_disable_unprepare(dp->pclk);
                return ret;
        }
 
@@ -272,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
        ret = rockchip_dp_pre_init(dp);
        if (ret < 0) {
                dev_err(dp->dev, "failed to pre init %d\n", ret);
+               clk_disable_unprepare(dp->pclk);
                return ret;
        }
 
index 8d17d00ddb4b75aea7456fd1ef3dcb4f1fbe5b0e..c987c826daa30fcc066ef01cffd5e5dafc86317b 100644 (file)
@@ -6,7 +6,6 @@ config DRM_SHMOBILE
        select BACKLIGHT_CLASS_DEVICE
        select BACKLIGHT_LCD_SUPPORT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
        help
index bd74732ea09b0a0b97923138955913e5463bf91f..134201ecc6fd98b65f286c0dd6f1135e04aaabc6 100644 (file)
@@ -254,12 +254,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
        }
 
        /* Get reset resources */
-       compo->rst_main = devm_reset_control_get(dev, "compo-main");
+       compo->rst_main = devm_reset_control_get_shared(dev, "compo-main");
        /* Take compo main out of reset */
        if (!IS_ERR(compo->rst_main))
                reset_control_deassert(compo->rst_main);
 
-       compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
+       compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux");
        /* Take compo aux out of reset */
        if (!IS_ERR(compo->rst_aux))
                reset_control_deassert(compo->rst_aux);
index f628b6d8f23fd6d1738614b8df3752d6adea924e..4a192210574f5ed442d09e33babb8697eacd4bad 100644 (file)
@@ -77,6 +77,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
        DRM_DEBUG_DRIVER("Disabling the CRTC\n");
 
        sun4i_tcon_disable(drv->tcon);
+
+       if (crtc->state->event && !crtc->state->active) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+
+               crtc->state->event = NULL;
+       }
 }
 
 static void sun4i_crtc_enable(struct drm_crtc *crtc)
index 5b89940edcb1c58140b30af8876cb9bef65cb7bf..7092daaf6c432b8fa8d2bb93bda63785cf307f01 100644 (file)
@@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
        /* Frame Buffer Operations */
 
        /* VBlank Operations */
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = sun4i_drv_enable_vblank,
        .disable_vblank         = sun4i_drv_disable_vblank,
 };
@@ -185,7 +185,6 @@ static void sun4i_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
 
-       drm_connector_unregister_all(drm);
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
        sun4i_framebuffer_free(drm);
@@ -300,6 +299,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
                count += sun4i_drv_add_endpoints(&pdev->dev, &match,
                                                pipeline);
+               of_node_put(pipeline);
 
                DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
                                 count, i);
index a0b30c216a5bf5a96a2077eb10282102bfa0f5a7..70688febd7ac8b08ee064ffcaabef506e63515eb 100644 (file)
@@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm)
 {
        struct sun4i_drv *drv = drm->dev_private;
 
-       if (drv->fbdev)
-               drm_fbdev_cma_hotplug_event(drv->fbdev);
+       drm_fbdev_cma_hotplug_event(drv->fbdev);
 }
 
 static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
index 1b12aa7a715e2eecd64232459366b4cafa8a8bff..e6d71fa4028eab234b57973bfe318c7259244377 100644 (file)
@@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
                struct tegra_bo *bo = fb->planes[i];
 
                if (bo) {
-                       if (bo->pages && bo->vaddr)
+                       if (bo->pages)
                                vunmap(bo->vaddr);
 
                        drm_gem_object_unreference_unlocked(&bo->gem);
index f60a1ec84fa4b1025891830f0b3444b0a16b9dbf..28fed7e206d030abb23eb1141a7eac672fc702a7 100644 (file)
@@ -2,7 +2,6 @@ config DRM_TILCDC
        tristate "DRM Support for TI LCDC Display Controller"
        depends on DRM && OF && ARM
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
        select VIDEOMODE_HELPERS
index e340d0d664292c640a3615bb91d5e5906df8946e..4054d804fe068f5821d49d675549d40a7ea23fb6 100644 (file)
@@ -146,7 +146,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
        BUG_ON(bo->mem.mm_node != NULL);
        BUG_ON(!list_empty(&bo->lru));
        BUG_ON(!list_empty(&bo->ddestroy));
-
        ttm_tt_destroy(bo->ttm);
        atomic_dec(&bo->glob->bo_count);
        fence_put(bo->moving);
@@ -1044,9 +1043,9 @@ out_unlock:
        return ret;
 }
 
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
-                             struct ttm_mem_reg *mem,
-                             uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+                      struct ttm_mem_reg *mem,
+                      uint32_t *new_flags)
 {
        int i;
 
@@ -1078,6 +1077,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
 
        return false;
 }
+EXPORT_SYMBOL(ttm_bo_mem_compat);
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
index 4da0e784f9e7dd5851b24d5bfcfe8eac764d8f06..2df602a35f9291ce178a6634ad9d0531ff331df4 100644 (file)
@@ -53,6 +53,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
+               ttm_tt_unbind(ttm);
                ttm_bo_free_old_node(bo);
                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
                                TTM_PL_MASK_MEM);
index d28d4333dcce091fb36c6d6242b2328e8eb9f3e2..526e5a78eea5eba44370f5ee4ddd6a136f18e03d 100644 (file)
@@ -166,16 +166,10 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 
 void ttm_tt_destroy(struct ttm_tt *ttm)
 {
-       int ret;
-
        if (ttm == NULL)
                return;
 
-       if (ttm->state == tt_bound) {
-               ret = ttm->func->unbind(ttm);
-               BUG_ON(ret);
-               ttm->state = tt_unbound;
-       }
+       ttm_tt_unbind(ttm);
 
        if (ttm->state == tt_unbound)
                ttm_tt_unpopulate(ttm);
@@ -255,6 +249,17 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
 
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+       int ret;
+
+       if (ttm->state == tt_bound) {
+               ret = ttm->func->unbind(ttm);
+               BUG_ON(ret);
+               ttm->state = tt_unbound;
+       }
+}
+
 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        int ret = 0;
index 613ab0622d6e4a2dfbfeaf3efee59bfb6c17b7f7..1616ec4f4d84d5a8d7160ba84f8aa8d85fa3e918 100644 (file)
@@ -4,12 +4,7 @@ config DRM_UDL
        depends on USB_SUPPORT
        depends on USB_ARCH_HAS_HCD
        select USB
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_DEFERRED_IO
        select DRM_KMS_HELPER
-        select DRM_KMS_FB_HELPER
        help
          This is a KMS driver for the USB displaylink video adapters.
           Say M/Y to add support for these devices via drm/kms interfaces.
index 25ed00872dbeb608ae0f3e9155f0181e0c3a1736..8b42d31a7f0e8b61ed776200841285d7d5687dbb 100644 (file)
@@ -91,8 +91,7 @@ static void vc4_lastclose(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-       if (vc4->fbdev)
-               drm_fbdev_cma_restore_mode(vc4->fbdev);
+       drm_fbdev_cma_restore_mode(vc4->fbdev);
 }
 
 static const struct file_operations vc4_drm_fops = {
index 9a217fd025f3d3b70d6a19761a96226423bc3ee0..4ac894d993cd80472777b2a3bacc2fcf0dd52810 100644 (file)
@@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-       if (vc4->fbdev)
-               drm_fbdev_cma_hotplug_event(vc4->fbdev);
+       drm_fbdev_cma_hotplug_event(vc4->fbdev);
 }
 
 struct vc4_commit {
index 3f4c7b8420287188288641e66b9f82addd5b21f3..bfcdea1330e64282fd379de21de87087cc993122 100644 (file)
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
 
 obj-$(CONFIG_DRM_VGEM) += vgem.o
index 29c2aab3c1a79b9252cdecf13328fc9ea09175c8..c15bafb06665f0ec8c788cd1c21d7067dc5a8df3 100644 (file)
@@ -83,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct vgem_file *vfile;
+       int ret;
+
+       vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+       if (!vfile)
+               return -ENOMEM;
+
+       file->driver_priv = vfile;
+
+       ret = vgem_fence_open(vfile);
+       if (ret) {
+               kfree(vfile);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct vgem_file *vfile = file->driver_priv;
+
+       vgem_fence_close(vfile);
+       kfree(vfile);
+}
+
 /* ioctls */
 
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -164,6 +192,8 @@ unref:
 }
 
 static struct drm_ioctl_desc vgem_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -271,9 +301,12 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
 
 static struct drm_driver vgem_driver = {
        .driver_features                = DRIVER_GEM | DRIVER_PRIME,
+       .open                           = vgem_open,
+       .preclose                       = vgem_preclose,
        .gem_free_object_unlocked       = vgem_gem_free_object,
        .gem_vm_ops                     = &vgem_gem_vm_ops,
        .ioctls                         = vgem_ioctls,
+       .num_ioctls                     = ARRAY_SIZE(vgem_ioctls),
        .fops                           = &vgem_driver_fops,
 
        .dumb_create                    = vgem_gem_dumb_create,
@@ -328,5 +361,6 @@ module_init(vgem_init);
 module_exit(vgem_exit);
 
 MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
index 988cbaae7588ce48fb240b4b086c0b3f8df0d426..1f8798ad329c416a0aa9c867d0ceae826ae6c313 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+       struct idr fence_idr;
+       struct mutex fence_mutex;
+};
+
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
        struct drm_gem_object base;
 };
 
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
+
 #endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644 (file)
index 0000000..5c57c1f
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+#define VGEM_FENCE_TIMEOUT (10*HZ)
+
+struct vgem_fence {
+       struct fence base;
+       struct spinlock lock;
+       struct timer_list timer;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+       return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+       return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+       return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+       return true;
+}
+
+static void vgem_fence_release(struct fence *base)
+{
+       struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+       del_timer_sync(&fence->timer);
+       fence_free(&fence->base);
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+                                         int size)
+{
+       snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+static const struct fence_ops vgem_fence_ops = {
+       .get_driver_name = vgem_fence_get_driver_name,
+       .get_timeline_name = vgem_fence_get_timeline_name,
+       .enable_signaling = vgem_fence_enable_signaling,
+       .signaled = vgem_fence_signaled,
+       .wait = fence_default_wait,
+       .release = vgem_fence_release,
+
+       .fence_value_str = vgem_fence_value_str,
+       .timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static void vgem_fence_timeout(unsigned long data)
+{
+       struct vgem_fence *fence = (struct vgem_fence *)data;
+
+       fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+                                      unsigned int flags)
+{
+       struct vgem_fence *fence;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               return NULL;
+
+       spin_lock_init(&fence->lock);
+       fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+                  fence_context_alloc(1), 1);
+
+       setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+       /* We force the fence to expire within 10s to prevent driver hangs */
+       mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
+
+       return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+                        struct drm_gem_object *obj)
+{
+       struct dma_buf *dmabuf;
+
+       if (obj->dma_buf)
+               return 0;
+
+       dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       obj->dma_buf = dmabuf;
+       drm_gem_object_reference(obj);
+       return 0;
+}
+
+/*
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
+ *
+ * Create and attach a fence to the vGEM handle. This fence is then exposed
+ * via the dma-buf reservation object and visible to consumers of the exported
+ * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
+ * vGEM buffer is being written to by the client and is exposed as an exclusive
+ * fence, otherwise the fence indicates the client is current reading from the
+ * buffer and all future writes should wait for the client to signal its
+ * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
+ * an exclusive fence when adding a read, or any fence when adding a write),
+ * -EBUSY is reported. Serialisation between operations should be handled
+ * by waiting upon the dma-buf.
+ *
+ * This returns the handle for the new fence that must be signaled within 10
+ * seconds (or otherwise it will automatically expire). See
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
+ *
+ * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
+ */
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file)
+{
+       struct drm_vgem_fence_attach *arg = data;
+       struct vgem_file *vfile = file->driver_priv;
+       struct reservation_object *resv;
+       struct drm_gem_object *obj;
+       struct fence *fence;
+       int ret;
+
+       if (arg->flags & ~VGEM_FENCE_WRITE)
+               return -EINVAL;
+
+       if (arg->pad)
+               return -EINVAL;
+
+       obj = drm_gem_object_lookup(file, arg->handle);
+       if (!obj)
+               return -ENOENT;
+
+       ret = attach_dmabuf(dev, obj);
+       if (ret)
+               goto err;
+
+       fence = vgem_fence_create(vfile, arg->flags);
+       if (!fence) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       /* Check for a conflicting fence */
+       resv = obj->dma_buf->resv;
+       if (!reservation_object_test_signaled_rcu(resv,
+                                                 arg->flags & VGEM_FENCE_WRITE)) {
+               ret = -EBUSY;
+               goto err_fence;
+       }
+
+       /* Expose the fence via the dma-buf */
+       ret = 0;
+       mutex_lock(&resv->lock.base);
+       if (arg->flags & VGEM_FENCE_WRITE)
+               reservation_object_add_excl_fence(resv, fence);
+       else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+               reservation_object_add_shared_fence(resv, fence);
+       mutex_unlock(&resv->lock.base);
+
+       /* Record the fence in our idr for later signaling */
+       if (ret == 0) {
+               mutex_lock(&vfile->fence_mutex);
+               ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+               mutex_unlock(&vfile->fence_mutex);
+               if (ret > 0) {
+                       arg->out_fence = ret;
+                       ret = 0;
+               }
+       }
+err_fence:
+       if (ret) {
+               fence_signal(fence);
+               fence_put(fence);
+       }
+err:
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+}
+
+/*
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
+ *
+ * Signal and consume a fence ealier attached to a vGEM handle using
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
+ *
+ * All fences must be signaled within 10s of attachment or otherwise they
+ * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
+ *
+ * Signaling a fence indicates to all consumers of the dma-buf that the
+ * client has completed the operation associated with the fence, and that the
+ * buffer is then ready for consumption.
+ *
+ * If the fence does not exist (or has already been signaled by the client),
+ * vgem_fence_signal_ioctl returns -ENOENT.
+ */
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file)
+{
+       struct vgem_file *vfile = file->driver_priv;
+       struct drm_vgem_fence_signal *arg = data;
+       struct fence *fence;
+       int ret = 0;
+
+       if (arg->flags)
+               return -EINVAL;
+
+       mutex_lock(&vfile->fence_mutex);
+       fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+       mutex_unlock(&vfile->fence_mutex);
+       if (!fence)
+               return -ENOENT;
+       if (IS_ERR(fence))
+               return PTR_ERR(fence);
+
+       if (fence_is_signaled(fence))
+               ret = -ETIMEDOUT;
+
+       fence_signal(fence);
+       fence_put(fence);
+       return ret;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+       mutex_init(&vfile->fence_mutex);
+       idr_init(&vfile->fence_idr);
+
+       return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+       fence_signal(p);
+       fence_put(p);
+       return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+       idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+       idr_destroy(&vfile->fence_idr);
+}
index 9983eadb81b6a18bb9eef9afc80895970fa5ac18..e1afc3d3f8d95cfb33d1905fcbccf0ab0201190d 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_VIRTIO_GPU
        tristate "Virtio GPU driver"
        depends on DRM && VIRTIO
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
         select DRM_KMS_HELPER
-        select DRM_KMS_FB_HELPER
         select DRM_TTM
        help
           This is the virtual GPU driver for virtio.  It can be used with
index ac758cdbc1bc586f15b10ba78b147170ded30a77..4e192aa2d0216fe15d9f7746e3092886b55e031d 100644 (file)
@@ -53,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
        struct virtio_gpu_framebuffer *virtio_gpu_fb
                = to_virtio_gpu_framebuffer(fb);
 
-       if (virtio_gpu_fb->obj)
-               drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+       drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(virtio_gpu_fb);
 }
@@ -326,8 +325,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
        ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
        if (ret) {
                kfree(virtio_gpu_fb);
-               if (obj)
-                       drm_gem_object_unreference_unlocked(obj);
+               drm_gem_object_unreference_unlocked(obj);
                return NULL;
        }
 
@@ -348,7 +346,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
        drm_atomic_helper_cleanup_planes(dev, state);
 }
 
-struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
+static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
        .atomic_commit_tail = vgdev_atomic_commit_tail,
 };
 
index 9b078a4939968d8521a96b3f83280f4f06b407bc..0cd889015dc57d5c0d0a8b89f39cd17b10303fbf 100644 (file)
@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
 {
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
+       uint32_t new_flags;
 
        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        if (unlikely(ret != 0))
@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, placement, interruptible, false);
+
        if (!ret)
                vmw_bo_pin_reserved(buf, true);
 
@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 {
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
+       uint32_t new_flags;
 
        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        if (unlikely(ret != 0))
@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
+       if (buf->pin_count > 0) {
+               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+               goto out_unreserve;
+       }
+
        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
                              false);
        if (likely(ret == 0) || ret == -ERESTARTSYS)
@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
        struct ttm_placement placement;
        struct ttm_place place;
        int ret = 0;
+       uint32_t new_flags;
 
        place = vmw_vram_placement.placement[0];
        place.lpfn = bo->num_pages;
@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
         */
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
-           bo->mem.start > 0)
+           bo->mem.start > 0 &&
+           buf->pin_count == 0)
                (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
-       ret = ttm_bo_validate(bo, &placement, interruptible, false);
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(&placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
        /* For some reason we didn't end up at the start of vram */
        WARN_ON(ret == 0 && bo->offset != 0);
index 5d5c9515618d628c2480aee061f8a71d8e6f53a3..e8ae3dc476d16fb756fd5ec4380fe644f1f858d0 100644 (file)
@@ -233,6 +233,7 @@ static int vmw_force_iommu;
 static int vmw_restrict_iommu;
 static int vmw_force_coherent;
 static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -249,6 +250,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -660,6 +663,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
+       dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
        dev_priv->enable_fb = enable_fbdev;
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -706,6 +711,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                        vmw_read(dev_priv,
                                 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 
+               /*
+                * Workaround for low memory 2D VMs to compensate for the
+                * allocation taken by fbdev
+                */
+               if (!(dev_priv->capabilities & SVGA_CAP_3D))
+                       mem_size *= 2;
+
                dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
                dev_priv->prim_bb_mem =
                        vmw_read(dev_priv,
index 9a90f824814eaf6279088a45460d4cc38b6ec9f4..74304b03f9d4811973c27e0259cd4cbbf9417ae6 100644 (file)
@@ -387,6 +387,7 @@ struct vmw_private {
        spinlock_t hw_lock;
        spinlock_t cap_lock;
        bool has_dx;
+       bool assume_16bpp;
 
        /*
         * VGA registers.
index 1a1a87cbf1097988bc60f91bd6debb872211908d..dc5beff2b4aaff83c18dc77187647102efea5266 100644 (file)
@@ -3625,9 +3625,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
                                   (sw_context->cmd_bounce_size >> 1));
        }
 
-       if (sw_context->cmd_bounce != NULL)
-               vfree(sw_context->cmd_bounce);
-
+       vfree(sw_context->cmd_bounce);
        sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
 
        if (sw_context->cmd_bounce == NULL) {
index 679a4cb98ee306bb47c5c0097f1aa829c17a7fc8..d2d93959b1198ce41470a1a76a8341553c1ddc6c 100644 (file)
@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
 
        par->set_fb = &vfb->base;
 
-       if (!par->bo_ptr) {
-               /*
-                * Pin before mapping. Since we don't know in what placement
-                * to pin, call into KMS to do it for us.
-                */
-               ret = vfb->pin(vfb);
-               if (ret) {
-                       DRM_ERROR("Could not pin the fbdev framebuffer.\n");
-                       return ret;
-               }
-
-               ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
-                                 par->vmw_bo->base.num_pages, &par->map);
-               if (ret) {
-                       vfb->unpin(vfb);
-                       DRM_ERROR("Could not map the fbdev framebuffer.\n");
-                       return ret;
-               }
-
-               par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
-       }
-
        return 0;
 }
 
@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
        if (ret)
                goto out_unlock;
 
+       if (!par->bo_ptr) {
+               struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+               /*
+                * Pin before mapping. Since we don't know in what placement
+                * to pin, call into KMS to do it for us.
+                */
+               ret = vfb->pin(vfb);
+               if (ret) {
+                       DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+                       goto out_unlock;
+               }
+
+               ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+                                 par->vmw_bo->base.num_pages, &par->map);
+               if (ret) {
+                       vfb->unpin(vfb);
+                       DRM_ERROR("Could not map the fbdev framebuffer.\n");
+                       goto out_unlock;
+               }
+
+               par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+       }
+
+
        vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
                          par->set_fb->width, par->set_fb->height);
 
index 8a69d4da40b5dcdf5f77b624d600d084b712a194..bf28ccc150dfe081924fc08cf248d898003846f7 100644 (file)
@@ -1555,14 +1555,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
                DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
        };
        int i;
-       u32 assumed_bpp = 2;
+       u32 assumed_bpp = 4;
 
-       /*
-        * If using screen objects, then assume 32-bpp because that's what the
-        * SVGA device is assuming
-        */
-       if (dev_priv->active_display_unit == vmw_du_screen_object)
-               assumed_bpp = 4;
+       if (dev_priv->assume_16bpp)
+               assumed_bpp = 2;
 
        if (dev_priv->active_display_unit == vmw_du_screen_target) {
                max_width  = min(max_width,  dev_priv->stdu_max_width);
index f0374f9b56cad3a522119f7d4cf8a8940f445bf0..e57a0bad7a626daf505625ce019f2583218a51ab 100644 (file)
@@ -300,6 +300,9 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                break;
        }
 
+       if (retries == RETRIES)
+               return -EINVAL;
+
        *msg_len = reply_len;
        *msg     = reply;
 
index 9ca818fb034c923c4737ccfedb3ea98002aaae84..41932a7c4f79516da86f9e35d1223c8e033cf248 100644 (file)
@@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
 
        WARN_ON_ONCE(!stdu->defined);
 
-       if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
-           new_fb->height == mode->vdisplay)
+       new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+       if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
+           new_vfbs->surface->base_size.height == mode->vdisplay)
                new_content_type = SAME_AS_DISPLAY;
        else if (vfb->dmabuf)
                new_content_type = SEPARATE_DMA;
@@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
                        content_srf.mip_levels[0]     = 1;
                        content_srf.multisample_count = 0;
                } else {
-                       new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
                        content_srf = *new_vfbs->surface;
                }
 
@@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
                        return ret;
                }
        } else if (new_content_type == SAME_AS_DISPLAY) {
-               new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
                new_display_srf = vmw_surface_reference(new_vfbs->surface);
        }
 
index 95b7d61d9910f3c65fced473c4e2fab5ce0d4e9b..fb6f1f4472795700c1f12889c405b9350b4ebcb0 100644 (file)
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_ALWAYS_VALID          (1 << 4)
 #define MT_QUIRK_VALID_IS_INRANGE      (1 << 5)
 #define MT_QUIRK_VALID_IS_CONFIDENCE   (1 << 6)
+#define MT_QUIRK_CONFIDENCE            (1 << 7)
 #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE   (1 << 8)
 #define MT_QUIRK_NO_AREA               (1 << 9)
 #define MT_QUIRK_IGNORE_DUPLICATES     (1 << 10)
@@ -78,6 +79,7 @@ struct mt_slot {
        __s32 contactid;        /* the device ContactID assigned to this slot */
        bool touch_state;       /* is the touch valid? */
        bool inrange_state;     /* is the finger in proximity of the sensor? */
+       bool confidence_state;  /* is the touch made by a finger? */
 };
 
 struct mt_class {
@@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        return 1;
                case HID_DG_CONFIDENCE:
                        if (cls->name == MT_CLS_WIN_8 &&
-                               field->application == HID_DG_TOUCHPAD) {
-                               cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
-                               cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
-                       }
+                               field->application == HID_DG_TOUCHPAD)
+                               cls->quirks |= MT_QUIRK_CONFIDENCE;
                        mt_store_field(usage, td, hi);
                        return 1;
                case HID_DG_TIPSWITCH:
@@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
                return;
 
        if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
+               int active;
                int slotnum = mt_compute_slot(td, input);
                struct mt_slot *s = &td->curdata;
                struct input_mt *mt = input->mt;
@@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
                                return;
                }
 
+               if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
+                       s->confidence_state = 1;
+               active = (s->touch_state || s->inrange_state) &&
+                                                       s->confidence_state;
+
                input_mt_slot(input, slotnum);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER,
-                       s->touch_state || s->inrange_state);
-               if (s->touch_state || s->inrange_state) {
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
+               if (active) {
                        /* this finger is in proximity of the sensor */
                        int wide = (s->w > s->h);
                        /* divided by two to match visual scale of touch */
@@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
                        td->curdata.touch_state = value;
                        break;
                case HID_DG_CONFIDENCE:
+                       if (quirks & MT_QUIRK_CONFIDENCE)
+                               td->curdata.confidence_state = value;
                        if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
                                td->curvalid = value;
                        break;
index cc6439ab3f714145f4d86c49c3d3c79716a058b1..041050edd80991713d9bb5a776c9a1d402f23bf5 100644 (file)
@@ -1268,6 +1268,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
                }
        }
 
+       idx = 0;
+
        do {
                if (msgs[idx].len == 0) {
                        ret = -EINVAL;
index 445398c314a3303a41ba51313bbcbfd3a40c421c..b126dbaa47e37014acaf78ba528a6e177bffa9d8 100644 (file)
@@ -912,7 +912,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        ret = tegra_i2c_init(i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize i2c controller");
-               goto unprepare_div_clk;
+               goto disable_div_clk;
        }
 
        ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
index e33022e2d459f18b57027337c9c9de9243488c07..6e5fac6a5262a0824d69ebe534b18fd855d35922 100644 (file)
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
  * The board info passed can safely be __initdata, but be careful of embedded
  * pointers (for platform_data, functions, etc) since that won't be copied.
  */
-int __init
-i2c_register_board_info(int busnum,
-       struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
 {
        int status;
 
index 26e7c5187a589b473857fd991acdd60a1bdb4609..c6a90b4a9c626dcf4fdf65e51e2f01b5b6ac4968 100644 (file)
@@ -145,7 +145,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
                mux->data.idle_in_use = true;
 
        /* map address from "reg" if exists */
-       if (of_address_to_resource(np, 0, &res)) {
+       if (of_address_to_resource(np, 0, &res) == 0) {
                mux->data.reg_size = resource_size(&res);
                mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
                if (IS_ERR(mux->data.reg))
index 923f56598d4b51a84ee7b5b322c2c9bfefe4f0a5..3a9f106787d28b2f85248402b68628fd02604460 100644 (file)
@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
 
        mutex_lock(&st->buf_lock);
        ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
-       if (ret)
+       if (ret < 0)
                goto error_ret;
        st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
        st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
                break;
        case IIO_CHAN_INFO_SCALE:
                ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
-               if (ret)
+               if (ret < 0)
                        goto error_ret;
                *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
                ret = IIO_VAL_INT_PLUS_MICRO;
index 21e19b60e2b98305b209a648690e8f18f64b40f9..2123f0ac2e2a47b608eedc6095f1681802edfcd9 100644 (file)
@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
 
        st = iio_priv(indio_dev);
 
-       st->reg = devm_regulator_get(&spi->dev, "vref");
-       if (!IS_ERR_OR_NULL(st->reg)) {
+       st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+       if (!IS_ERR(st->reg)) {
                ret = regulator_enable(st->reg);
                if (ret)
                        return ret;
@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
 
                st->vref_mv = ret / 1000;
        } else {
+               /* Any other error indicates that the regulator does exist */
+               if (PTR_ERR(st->reg) != -ENODEV)
+                       return PTR_ERR(st->reg);
                /* Use internal reference */
                st->vref_mv = 2500;
        }
index f62b8bd9ad7ef0fb440e8d28a55061db4ffb3496..dd6fc6d21f9d0c6057b6b5029800eef2fa82dacb 100644 (file)
@@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
        int i;
        acpi_status status;
        union acpi_object *cpm;
+       int ret;
 
        status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
        if (ACPI_FAILURE(status))
@@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
                        }
                }
        }
-
+       ret = cpm->package.count;
        kfree(buffer.pointer);
 
-       return cpm->package.count;
+       return ret;
 }
 
 static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
index a5793c8f15901483e24e24393173ea8dbee6b191..60df4f8e81bed10ac8e9b6149ec4c3ef5c6abf9f 100644 (file)
@@ -530,6 +530,7 @@ static PORT_PMA_ATTR(port_xmit_data             , 12, 32, 192);
 static PORT_PMA_ATTR(port_rcv_data                 , 13, 32, 224);
 static PORT_PMA_ATTR(port_xmit_packets             , 14, 32, 256);
 static PORT_PMA_ATTR(port_rcv_packets              , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait                ,  0, 32, 320);
 
 /*
  * Counters added by extended set
@@ -560,6 +561,7 @@ static struct attribute *pma_attrs[] = {
        &port_pma_attr_port_rcv_data.attr.attr,
        &port_pma_attr_port_xmit_packets.attr.attr,
        &port_pma_attr_port_rcv_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
 };
 
@@ -579,6 +581,7 @@ static struct attribute *pma_attrs_ext[] = {
        &port_pma_attr_ext_port_xmit_data.attr.attr,
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_xmit_packets.attr.attr,
@@ -604,6 +607,7 @@ static struct attribute *pma_attrs_noietf[] = {
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
 };
 
index f5de851780555a5c9f06d91c2eb9bb2ff14c666e..dad4d0ebbdffb45e2c667cc95b86aabf39b53a87 100644 (file)
@@ -14113,8 +14113,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
 {
        unsigned long flags;
        struct hfi1_devdata *tmp, *peer = NULL;
+       struct hfi1_asic_data *asic_data;
        int ret = 0;
 
+       /* pre-allocate the asic structure in case we are the first device */
+       asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+       if (!asic_data)
+               return -ENOMEM;
+
        spin_lock_irqsave(&hfi1_devs_lock, flags);
        /* Find our peer device */
        list_for_each_entry(tmp, &hfi1_dev_list, list) {
@@ -14126,18 +14132,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
        }
 
        if (peer) {
+               /* use already allocated structure */
                dd->asic_data = peer->asic_data;
+               kfree(asic_data);
        } else {
-               dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
-               if (!dd->asic_data) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
+               dd->asic_data = asic_data;
                mutex_init(&dd->asic_data->asic_resource_mutex);
        }
        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
        return ret;
 }
index 1e503ad0bebb764ee1e05462604538e6324002f8..be91f6fa1c87b16fe4f31affd6c12848b1951792 100644 (file)
@@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
        u32 tlen = packet->tlen;
        struct rvt_qp *qp = packet->qp;
        bool has_grh = rcv_flags & HFI1_HAS_GRH;
-       bool sc4_bit = has_sc4_bit(packet);
-       u8 sc;
+       u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
        u32 bth1;
        int is_mcast;
        struct ib_grh *grh = NULL;
@@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                 */
                struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                u32 lqpn =  be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
-               u8 sl, sc5;
+               u8 sl;
 
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
                sl = ibp->sc_to_sl[sc5];
 
                process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
@@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
 
        if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
                u16 slid = be16_to_cpu(hdr->lrh[3]);
-               u8 sc5;
-
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
 
                return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
        }
@@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                if (qp->ibqp.qp_num > 1) {
                        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                        u16 slid;
-                       u8 sc5;
-
-                       sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-                       sc5 |= sc4_bit;
 
                        slid = be16_to_cpu(hdr->lrh[3]);
                        if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
@@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                /* Received on QP0, and so by definition, this is an SMP */
                struct opa_smp *smp = (struct opa_smp *)data;
                u16 slid = be16_to_cpu(hdr->lrh[3]);
-               u8 sc5;
-
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
 
                if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
                        goto drop;
@@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
        }
 
        wc.slid = be16_to_cpu(hdr->lrh[3]);
-       sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-       sc |= sc4_bit;
-       wc.sl = ibp->sc_to_sl[sc];
+       wc.sl = ibp->sc_to_sl[sc5];
 
        /*
         * Save the LMC lower bits if the destination LID is a unicast LID.
index c963cad92f5a8eb061af74ae964e9222115e1815..6e9081380a276cbb78da8687820b9c7092f684af 100644 (file)
@@ -600,8 +600,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
        cqp_init_info.scratch_array = cqp->scratch_array;
        status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
        if (status) {
-               i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
-                            status, maj_err, min_err);
+               i40iw_pr_err("cqp init status %d\n", status);
                goto exit;
        }
        status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
index 33959ed14563e0f43e5209320ba0fd5ff0338abb..283b64c942eebfea6378ee8f4ad5206e6f549876 100644 (file)
@@ -1474,6 +1474,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
        info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
        info->pd_id = iwpd->sc_pd.pd_id;
        info->total_len = iwmr->length;
+       info->remote_access = true;
        cqp_info->cqp_cmd = OP_ALLOC_STAG;
        cqp_info->post_sq = 1;
        cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
index 804dbcc37d3f70bc929d30bc63b13b9464f9aee0..a529a4535457217e761fd1e8f8b0748cb48a5498 100644 (file)
@@ -1031,17 +1031,17 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
 
        case XTYPE_XBOXONE:
                packet->data[0] = 0x09; /* activate rumble */
-               packet->data[1] = 0x08;
+               packet->data[1] = 0x00;
                packet->data[2] = xpad->odata_serial++;
-               packet->data[3] = 0x08; /* continuous effect */
-               packet->data[4] = 0x00; /* simple rumble mode */
-               packet->data[5] = 0x03; /* L and R actuator only */
-               packet->data[6] = 0x00; /* TODO: LT actuator */
-               packet->data[7] = 0x00; /* TODO: RT actuator */
+               packet->data[3] = 0x09;
+               packet->data[4] = 0x00;
+               packet->data[5] = 0x0F;
+               packet->data[6] = 0x00;
+               packet->data[7] = 0x00;
                packet->data[8] = strong / 512; /* left actuator */
                packet->data[9] = weak / 512;   /* right actuator */
-               packet->data[10] = 0x80;        /* length of pulse */
-               packet->data[11] = 0x00;        /* stop period of pulse */
+               packet->data[10] = 0xFF;
+               packet->data[11] = 0x00;
                packet->data[12] = 0x00;
                packet->len = 13;
                packet->pending = true;
@@ -1431,22 +1431,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
        int ep_irq_in_idx;
        int i, error;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+               return -ENODEV;
+
        for (i = 0; xpad_device[i].idVendor; i++) {
                if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
                    (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
                        break;
        }
 
-       if (xpad_device[i].xtype == XTYPE_XBOXONE &&
-           intf->cur_altsetting->desc.bInterfaceNumber != 0) {
-               /*
-                * The Xbox One controller lists three interfaces all with the
-                * same interface class, subclass and protocol. Differentiate by
-                * interface number.
-                */
-               return -ENODEV;
-       }
-
        xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
        if (!xpad)
                return -ENOMEM;
@@ -1478,6 +1471,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
                        if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
                                xpad->xtype = XTYPE_XBOX360W;
+                       else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+                               xpad->xtype = XTYPE_XBOXONE;
                        else
                                xpad->xtype = XTYPE_XBOX360;
                } else {
@@ -1492,6 +1487,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                        xpad->mapping |= MAP_STICKS_TO_NULL;
        }
 
+       if (xpad->xtype == XTYPE_XBOXONE &&
+           intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+               /*
+                * The Xbox One controller lists three interfaces all with the
+                * same interface class, subclass and protocol. Differentiate by
+                * interface number.
+                */
+               error = -ENODEV;
+               goto err_free_in_urb;
+       }
+
        error = xpad_init_output(intf, xpad);
        if (error)
                goto err_free_in_urb;
index 78f93cf68840d24328ebd0630f86c4e372d051bf..be5b399da5d3ec7b982fa49945bcc4d3be576b39 100644 (file)
@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 5:
                        etd->hw_version = 3;
                        break;
-               case 6:
-               case 7:
-               case 8:
-               case 9:
-               case 10:
-               case 13:
-               case 14:
+               case 6 ... 14:
                        etd->hw_version = 4;
                        break;
                default:
index a3f0f5a47490e936e31b45594861d692503429b1..0f586780ceb4bee18a38428b889dba8d994ea4c7 100644 (file)
@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
                return -ENXIO;
        }
 
-       if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
-               psmouse_dbg(psmouse, "VMMouse port in use.\n");
-               return -EBUSY;
-       }
-
        /* Check if the device is present */
        response = ~VMMOUSE_PROTO_MAGIC;
        VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
-       if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
-               release_region(VMMOUSE_PROTO_PORT, 4);
+       if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
                return -ENXIO;
-       }
 
        if (set_properties) {
                psmouse->vendor = VMMOUSE_VENDOR;
@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
                psmouse->model = version;
        }
 
-       release_region(VMMOUSE_PROTO_PORT, 4);
-
        return 0;
 }
 
@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
        psmouse_reset(psmouse);
        input_unregister_device(priv->abs_dev);
        kfree(priv);
-       release_region(VMMOUSE_PROTO_PORT, 4);
 }
 
 /**
@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
        struct input_dev *rel_dev = psmouse->dev, *abs_dev;
        int error;
 
-       if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
-               psmouse_dbg(psmouse, "VMMouse port in use.\n");
-               return -EBUSY;
-       }
-
        psmouse_reset(psmouse);
        error = vmmouse_enable(psmouse);
        if (error)
-               goto release_region;
+               return error;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        abs_dev = input_allocate_device();
@@ -502,8 +487,5 @@ init_fail:
        kfree(priv);
        psmouse->private = NULL;
 
-release_region:
-       release_region(VMMOUSE_PROTO_PORT, 4);
-
        return error;
 }
index b368b0515c5a3aa26161dd7893a1c22c2678e4b2..253df96be4276cc9196a6c0ab25b04f838d061ac 100644 (file)
@@ -157,11 +157,11 @@ static int rmi_function_match(struct device *dev, struct device_driver *drv)
 static void rmi_function_of_probe(struct rmi_function *fn)
 {
        char of_name[9];
+       struct device_node *node = fn->rmi_dev->xport->dev->of_node;
 
        snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
                fn->fd.function_number);
-       fn->dev.of_node = of_find_node_by_name(
-                               fn->rmi_dev->xport->dev->of_node, of_name);
+       fn->dev.of_node = of_get_child_by_name(node, of_name);
 }
 #else
 static inline void rmi_function_of_probe(struct rmi_function *fn)
index 8dd3fb5e1f9433f013f18c51728205c425a61560..88e91559c84e2dcad267a0e20fd4e65bc56238bb 100644 (file)
@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int ret;
        int offset;
-       u8 buf[14];
+       u8 buf[15];
        int pitch_x = 0;
        int pitch_y = 0;
        int clip_x_low = 0;
@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
 
        offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
 
-       if (item->reg_size > 14) {
-               dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
-                       item->reg_size);
+       if (item->reg_size > sizeof(buf)) {
+               dev_err(&fn->dev,
+                       "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+                       sizeof(buf), item->reg_size);
                return -ENODEV;
        }
 
index 3c3dd78303be0e68f1373cb01d6609be6a1efbc7..fed73eeb47b3c6e263ca2e0415d9b385cc2f4854 100644 (file)
@@ -118,6 +118,13 @@ static int ts4800_parse_dt(struct platform_device *pdev,
                return -ENODEV;
        }
 
+       ts->regmap = syscon_node_to_regmap(syscon_np);
+       of_node_put(syscon_np);
+       if (IS_ERR(ts->regmap)) {
+               dev_err(dev, "cannot get parent's regmap\n");
+               return PTR_ERR(ts->regmap);
+       }
+
        error = of_property_read_u32_index(np, "syscon", 1, &reg);
        if (error < 0) {
                dev_err(dev, "no offset in syscon\n");
@@ -134,12 +141,6 @@ static int ts4800_parse_dt(struct platform_device *pdev,
 
        ts->bit = BIT(bit);
 
-       ts->regmap = syscon_node_to_regmap(syscon_np);
-       if (IS_ERR(ts->regmap)) {
-               dev_err(dev, "cannot get parent's regmap\n");
-               return PTR_ERR(ts->regmap);
-       }
-
        return 0;
 }
 
index 7295c198aa086be8630b3a77e202415649c6937b..6fe55d598facac3a05114e57e03b92b9d162eb87 100644 (file)
 #include <linux/regmap.h>
 #include "tsc200x-core.h"
 
+static const struct input_id tsc2004_input_id = {
+       .bustype = BUS_I2C,
+       .product = 2004,
+};
+
 static int tsc2004_cmd(struct device *dev, u8 cmd)
 {
        u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
                         const struct i2c_device_id *id)
 
 {
-       return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+       return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
                             devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
                             tsc2004_cmd);
 }
index b9f593dfd2ef8368223e5d7d7d9ca06856afcc8d..f2c5f0e47f77dd6ab177adec2bb102ed0da5dd08 100644 (file)
 #include <linux/regmap.h>
 #include "tsc200x-core.h"
 
+static const struct input_id tsc2005_input_id = {
+       .bustype = BUS_SPI,
+       .product = 2005,
+};
+
 static int tsc2005_cmd(struct device *dev, u8 cmd)
 {
        u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
        if (error)
                return error;
 
-       return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+       return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
                             devm_regmap_init_spi(spi, &tsc200x_regmap_config),
                             tsc2005_cmd);
 }
index 15240c1ee850abd4f5099db9a9e21a4b8ecf725b..dfa7f1c4f5453bd9d1544badc24548eab0851361 100644 (file)
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
        mutex_unlock(&ts->mutex);
 }
 
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
                  int (*tsc200x_cmd)(struct device *dev, u8 cmd))
 {
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
        snprintf(ts->phys, sizeof(ts->phys),
                 "%s/input-ts", dev_name(dev));
 
-       input_dev->name = "TSC200X touchscreen";
+       if (tsc_id->product == 2004) {
+               input_dev->name = "TSC200X touchscreen";
+       } else {
+               input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+                                                "TSC%04d touchscreen",
+                                                tsc_id->product);
+               if (!input_dev->name)
+                       return -ENOMEM;
+       }
+
        input_dev->phys = ts->phys;
-       input_dev->id.bustype = bustype;
+       input_dev->id = *tsc_id;
        input_dev->dev.parent = dev;
        input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
        input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
index 7a482d1026148c692988172773af1c3f2ad9e57e..49a63a3c684094a331a7465dda370f1a2f0fcb21 100644 (file)
@@ -70,7 +70,7 @@
 extern const struct regmap_config tsc200x_regmap_config;
 extern const struct dev_pm_ops tsc200x_pm_ops;
 
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
                  int (*tsc200x_cmd)(struct device *dev, u8 cmd));
 int tsc200x_remove(struct device *dev);
index bab3c6acf6a228f13fcb44756b43374293bf030f..b6fc4bde79ded75430725f97f72d7a9c3bae2c6b 100644 (file)
@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
-#define W8001_MAX_LENGTH       11
+#define W8001_MAX_LENGTH       13
 #define W8001_LEAD_MASK                0x80
 #define W8001_LEAD_BYTE                0x80
 #define W8001_TAB_MASK         0x40
@@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001)
                bool touch = data[0] & (1 << i);
 
                input_mt_slot(dev, i);
+               input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
                if (touch) {
                        x = (data[6 * i + 1] << 7) | data[6 * i + 2];
                        y = (data[6 * i + 3] << 7) | data[6 * i + 4];
@@ -339,6 +340,15 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
                w8001->idx = 0;
                parse_multi_touch(w8001);
                break;
+
+       default:
+               /*
+                * ThinkPad X60 Tablet PC (pen only device) sometimes
+                * sends invalid data packets that are larger than
+                * W8001_PKTLEN_TPCPEN. Let's start over again.
+                */
+               if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
+                       w8001->idx = 0;
        }
 
        return IRQ_HANDLED;
@@ -513,6 +523,8 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename,
                                        0, touch.x, 0, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_Y,
                                        0, touch.y, 0, 0);
+               input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+                                       0, MT_TOOL_MAX, 0, 0);
 
                strlcat(basename, " 2FG", basename_sz);
                if (w8001->max_pen_x && w8001->max_pen_y)
index 9e0034196e10468ea6e624b64737a1487dade7fa..59741ead7e158ca4e7b0d6506a0529c03c3fe412 100644 (file)
@@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
                                break;
                        }
 
+                       devid = e->devid;
                        DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
                                    hid, uid,
                                    PCI_BUS_NUM(devid),
                                    PCI_SLOT(devid),
                                    PCI_FUNC(devid));
 
-                       devid  = e->devid;
                        flags = e->flags;
 
                        ret = add_acpi_hid_device(hid, uid, &devid, false);
@@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
                        break;
        }
 
+       /*
+        * Order is important here to make sure any unity map requirements are
+        * fulfilled. The unity mappings are created and written to the device
+        * table during the amd_iommu_init_api() call.
+        *
+        * After that we call init_device_table_dma() to make sure any
+        * uninitialized DTE will block DMA, and in the end we flush the caches
+        * of all IOMMUs to make sure the changes to the device table are
+        * active.
+        */
+       ret = amd_iommu_init_api();
+
        init_device_table_dma();
 
        for_each_iommu(iommu)
                iommu_flush_all_caches(iommu);
 
-       ret = amd_iommu_init_api();
-
        if (!ret)
                print_iommu_info();
 
index 10700945994eed2e470c92d267c0bb5723ed0d1a..323dac9900ba3cc0a0c1351dfc39de86b4cb06ab 100644 (file)
@@ -4602,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
        for (i = 0; i < g_num_of_iommus; i++) {
                struct intel_iommu *iommu = g_iommus[i];
                struct dmar_domain *domain;
-               u16 did;
+               int did;
 
                if (!iommu)
                        continue;
 
-               for (did = 0; did < 0xffff; did++) {
-                       domain = get_iommu_domain(iommu, did);
+               for (did = 0; did < cap_ndoms(iommu->cap); did++) {
+                       domain = get_iommu_domain(iommu, (u16)did);
 
                        if (!domain)
                                continue;
index ba764a0835d3cd881981f0e049956e6f71a62788..e23001bfcfee996f8ab18f1695d1e3476bc6b9c2 100644 (file)
@@ -420,8 +420,10 @@ retry:
 
                /* Try replenishing IOVAs by flushing rcache. */
                flushed_rcache = true;
+               preempt_disable();
                for_each_online_cpu(cpu)
                        free_cpu_cached_iovas(cpu, iovad);
+               preempt_enable();
                goto retry;
        }
 
@@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
        bool can_insert = false;
        unsigned long flags;
 
-       cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+       cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
        spin_lock_irqsave(&cpu_rcache->lock, flags);
 
        if (!iova_magazine_full(cpu_rcache->loaded)) {
@@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
                iova_magazine_push(cpu_rcache->loaded, iova_pfn);
 
        spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+       put_cpu_ptr(rcache->cpu_rcaches);
 
        if (mag_to_free) {
                iova_magazine_free_pfns(mag_to_free, iovad);
@@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
        bool has_pfn = false;
        unsigned long flags;
 
-       cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+       cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
        spin_lock_irqsave(&cpu_rcache->lock, flags);
 
        if (!iova_magazine_empty(cpu_rcache->loaded)) {
@@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
                iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
 
        spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+       put_cpu_ptr(rcache->cpu_rcaches);
 
        return iova_pfn;
 }
index 8a4adbeb2b8cd694a08e18bfb3d2df8d31b7e937..70ed1d0151b8b271018d8fcf7465342843f38467 100644 (file)
@@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
 
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
-       gic_map_to_vpe(intr, vpe);
+       gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
        for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
        switch (bus_token) {
        case DOMAIN_BUS_IPI:
                is_ipi = d->bus_token == bus_token;
-               return to_of_node(d->fwnode) == node && is_ipi;
+               return (!node || to_of_node(d->fwnode) == node) && is_ipi;
                break;
        default:
                return 0;
index beb2841ceae58bbb69d65d99bf3697ee6f5dc686..3f1ab4986cfc507d2cf70a451cbbf2962deac2b8 100644 (file)
@@ -779,11 +779,31 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
                        V4L2_DV_BT_CAP_CUSTOM)
 };
 
-static inline const struct v4l2_dv_timings_cap *
-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+/*
+ * Return the DV timings capabilities for the requested sink pad. As a special
+ * case, pad value -1 returns the capabilities for the currently selected input.
+ */
+static const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
 {
-       return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
-                                     &adv7604_timings_cap_analog;
+       if (pad == -1) {
+               struct adv76xx_state *state = to_state(sd);
+
+               pad = state->selected_input;
+       }
+
+       switch (pad) {
+       case ADV76XX_PAD_HDMI_PORT_A:
+       case ADV7604_PAD_HDMI_PORT_B:
+       case ADV7604_PAD_HDMI_PORT_C:
+       case ADV7604_PAD_HDMI_PORT_D:
+               return &adv76xx_timings_cap_digital;
+
+       case ADV7604_PAD_VGA_RGB:
+       case ADV7604_PAD_VGA_COMP:
+       default:
+               return &adv7604_timings_cap_analog;
+       }
 }
 
 
@@ -1329,7 +1349,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
                const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
 
                if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
-                                          adv76xx_get_dv_timings_cap(sd),
+                                          adv76xx_get_dv_timings_cap(sd, -1),
                                           adv76xx_check_dv_timings, NULL))
                        continue;
                if (vtotal(bt) != stdi->lcf + 1)
@@ -1430,18 +1450,22 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd,
                return -EINVAL;
 
        return v4l2_enum_dv_timings_cap(timings,
-               adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
+               adv76xx_get_dv_timings_cap(sd, timings->pad),
+               adv76xx_check_dv_timings, NULL);
 }
 
 static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
                        struct v4l2_dv_timings_cap *cap)
 {
        struct adv76xx_state *state = to_state(sd);
+       unsigned int pad = cap->pad;
 
        if (cap->pad >= state->source_pad)
                return -EINVAL;
 
-       *cap = *adv76xx_get_dv_timings_cap(sd);
+       *cap = *adv76xx_get_dv_timings_cap(sd, pad);
+       cap->pad = pad;
+
        return 0;
 }
 
@@ -1450,9 +1474,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
 static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
                struct v4l2_dv_timings *timings)
 {
-       v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
-                       is_digital_input(sd) ? 250000 : 1000000,
-                       adv76xx_check_dv_timings, NULL);
+       v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
+                                is_digital_input(sd) ? 250000 : 1000000,
+                                adv76xx_check_dv_timings, NULL);
 }
 
 static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1620,7 +1644,7 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
 
        bt = &timings->bt;
 
-       if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+       if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
                                   adv76xx_check_dv_timings, NULL))
                return -ERANGE;
 
index 87c12930416f565a118c145501e54df1a6b13cc7..92d9d4214c3adb326dc1a23d0c2c43840172611c 100644 (file)
@@ -1072,7 +1072,7 @@ static int airspy_probe(struct usb_interface *intf,
        if (ret) {
                dev_err(s->dev, "Failed to register as video device (%d)\n",
                                ret);
-               goto err_unregister_v4l2_dev;
+               goto err_free_controls;
        }
        dev_info(s->dev, "Registered as %s\n",
                        video_device_node_name(&s->vdev));
@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
 
 err_free_controls:
        v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
        v4l2_device_unregister(&s->v4l2_dev);
 err_free_mem:
        kfree(s);
index 28e5be2c2eef22f75a1a902131ee987bb927ad0d..528390f33b5336842491d039658a87f11b3a75f2 100644 (file)
@@ -2171,7 +2171,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
         * The determine_valid_ioctls() call already should ensure
         * that this can never happen, but just in case...
         */
-       if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+       if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
                return -ENOTTY;
 
        if (ops->vidioc_cropcap)
index 199d261990be58c6c7ebaf9f783000d03d51098b..f32fbb8e8129e0b013b766c98457446d9588117d 100644 (file)
@@ -203,6 +203,7 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
                break;
        case MAX77620:
                fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
+               break;
        default:
                return -EINVAL;
        }
@@ -236,6 +237,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
                break;
        case MAX77620:
                fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
+               break;
        default:
                return -EINVAL;
        }
index e62fde3ac431c111ec945d4aba9c877f6308d81d..c5472e3c923126097fd93f2abf31402a1717c228 100644 (file)
@@ -355,8 +355,10 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
                goto idata_err;
        }
 
-       if (!idata->buf_bytes)
+       if (!idata->buf_bytes) {
+               idata->buf = NULL;
                return idata;
+       }
 
        idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
        if (!idata->buf) {
@@ -1786,8 +1788,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
 
        packed_cmd_hdr = packed->cmd_hdr;
        memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
-       packed_cmd_hdr[0] = (packed->nr_entries << 16) |
-               (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+       packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+               (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
        hdr_blocks = mmc_large_sector(card) ? 8 : 1;
 
        /*
@@ -1801,14 +1803,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
                        ((brq->data.blocks * brq->data.blksz) >=
                         card->ext_csd.data_tag_unit_size);
                /* Argument of CMD23 */
-               packed_cmd_hdr[(i * 2)] =
+               packed_cmd_hdr[(i * 2)] = cpu_to_le32(
                        (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
                        (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
-                       blk_rq_sectors(prq);
+                       blk_rq_sectors(prq));
                /* Argument of CMD18 or CMD25 */
-               packed_cmd_hdr[((i * 2)) + 1] =
+               packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
                        mmc_card_blockaddr(card) ?
-                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
                packed->blocks += blk_rq_sectors(prq);
                i++;
        }
index 86fac3e8683378acde59d1da87a0fad3878bf481..c763b404510f3a29d3864290297b22f596701cc3 100644 (file)
@@ -789,14 +789,16 @@ static int pxamci_probe(struct platform_device *pdev)
                gpio_direction_output(gpio_power,
                                      host->pdata->gpio_power_invert);
        }
-       if (gpio_is_valid(gpio_ro))
+       if (gpio_is_valid(gpio_ro)) {
                ret = mmc_gpio_request_ro(mmc, gpio_ro);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-               goto out;
-       } else {
-               mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
-                       0 : MMC_CAP2_RO_ACTIVE_HIGH;
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+                               gpio_ro);
+                       goto out;
+               } else {
+                       mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+                               0 : MMC_CAP2_RO_ACTIVE_HIGH;
+               }
        }
 
        if (gpio_is_valid(gpio_cd))
index 08e158895635cddda0bbea4e6973cfb8dd727e74..a136da8df6fe897d4f908d2723d4a95d8b152933 100644 (file)
@@ -1657,8 +1657,11 @@ static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
 
        /* detect availability of ELM module. Won't be present pre-OMAP4 */
        info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
-       if (!info->elm_of_node)
-               dev_dbg(dev, "ti,elm-id not in DT\n");
+       if (!info->elm_of_node) {
+               info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+               if (!info->elm_of_node)
+                       dev_dbg(dev, "ti,elm-id not in DT\n");
+       }
 
        /* select ecc-scheme for NAND */
        if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
index b9304a295f8648286c1d19e468c30b44084a4241..edc70ffad6607ac06d0a40b48316bef554c5f4c2 100644 (file)
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
 #define MAC_ADDRESS_EQUAL(A, B)        \
        ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
 
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+       0, 0, 0, 0, 0, 0
+};
 static u16 ad_ticks_per_sec;
 static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
 
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+       MULTICAST_LACPDU_ADDR;
 
 /* ================= main 802.3ad protocol functions ================== */
 static int ad_lacpdu_send(struct port *port);
@@ -657,6 +660,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
        }
 }
 
+static int __agg_active_ports(struct aggregator *agg)
+{
+       struct port *port;
+       int active = 0;
+
+       for (port = agg->lag_ports; port;
+            port = port->next_port_in_aggregator) {
+               if (port->is_enabled)
+                       active++;
+       }
+
+       return active;
+}
+
 /**
  * __get_agg_bandwidth - get the total bandwidth of an aggregator
  * @aggregator: the aggregator we're looking at
@@ -664,39 +681,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
  */
 static u32 __get_agg_bandwidth(struct aggregator *aggregator)
 {
+       int nports = __agg_active_ports(aggregator);
        u32 bandwidth = 0;
 
-       if (aggregator->num_of_ports) {
+       if (nports) {
                switch (__get_link_speed(aggregator->lag_ports)) {
                case AD_LINK_SPEED_1MBPS:
-                       bandwidth = aggregator->num_of_ports;
+                       bandwidth = nports;
                        break;
                case AD_LINK_SPEED_10MBPS:
-                       bandwidth = aggregator->num_of_ports * 10;
+                       bandwidth = nports * 10;
                        break;
                case AD_LINK_SPEED_100MBPS:
-                       bandwidth = aggregator->num_of_ports * 100;
+                       bandwidth = nports * 100;
                        break;
                case AD_LINK_SPEED_1000MBPS:
-                       bandwidth = aggregator->num_of_ports * 1000;
+                       bandwidth = nports * 1000;
                        break;
                case AD_LINK_SPEED_2500MBPS:
-                       bandwidth = aggregator->num_of_ports * 2500;
+                       bandwidth = nports * 2500;
                        break;
                case AD_LINK_SPEED_10000MBPS:
-                       bandwidth = aggregator->num_of_ports * 10000;
+                       bandwidth = nports * 10000;
                        break;
                case AD_LINK_SPEED_20000MBPS:
-                       bandwidth = aggregator->num_of_ports * 20000;
+                       bandwidth = nports * 20000;
                        break;
                case AD_LINK_SPEED_40000MBPS:
-                       bandwidth = aggregator->num_of_ports * 40000;
+                       bandwidth = nports * 40000;
                        break;
                case AD_LINK_SPEED_56000MBPS:
-                       bandwidth = aggregator->num_of_ports * 56000;
+                       bandwidth = nports * 56000;
                        break;
                case AD_LINK_SPEED_100000MBPS:
-                       bandwidth = aggregator->num_of_ports * 100000;
+                       bandwidth = nports * 100000;
                        break;
                default:
                        bandwidth = 0; /* to silence the compiler */
@@ -1530,10 +1548,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
 
        switch (__get_agg_selection_mode(curr->lag_ports)) {
        case BOND_AD_COUNT:
-               if (curr->num_of_ports > best->num_of_ports)
+               if (__agg_active_ports(curr) > __agg_active_ports(best))
                        return curr;
 
-               if (curr->num_of_ports < best->num_of_ports)
+               if (__agg_active_ports(curr) < __agg_active_ports(best))
                        return best;
 
                /*FALLTHROUGH*/
@@ -1561,8 +1579,14 @@ static int agg_device_up(const struct aggregator *agg)
        if (!port)
                return 0;
 
-       return netif_running(port->slave->dev) &&
-              netif_carrier_ok(port->slave->dev);
+       for (port = agg->lag_ports; port;
+            port = port->next_port_in_aggregator) {
+               if (netif_running(port->slave->dev) &&
+                   netif_carrier_ok(port->slave->dev))
+                       return 1;
+       }
+
+       return 0;
 }
 
 /**
@@ -1610,7 +1634,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
 
                agg->is_active = 0;
 
-               if (agg->num_of_ports && agg_device_up(agg))
+               if (__agg_active_ports(agg) && agg_device_up(agg))
                        best = ad_agg_selection_test(best, agg);
        }
 
@@ -1622,7 +1646,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
                 * answering partner.
                 */
                if (active && active->lag_ports &&
-                   active->lag_ports->is_enabled &&
+                   __agg_active_ports(active) &&
                    (__agg_has_partner(active) ||
                     (!__agg_has_partner(active) &&
                     !__agg_has_partner(best)))) {
@@ -1718,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
                aggregator->is_individual = false;
                aggregator->actor_admin_aggregator_key = 0;
                aggregator->actor_oper_aggregator_key = 0;
-               aggregator->partner_system = null_mac_addr;
+               eth_zero_addr(aggregator->partner_system.mac_addr_value);
                aggregator->partner_system_priority = 0;
                aggregator->partner_oper_aggregator_key = 0;
                aggregator->receive_state = 0;
@@ -1740,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
        if (aggregator) {
                ad_clear_agg(aggregator);
 
-               aggregator->aggregator_mac_address = null_mac_addr;
+               eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
                aggregator->aggregator_identifier = 0;
                aggregator->slave = NULL;
        }
@@ -2133,7 +2157,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                else
                                        temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
                                temp_aggregator->num_of_ports--;
-                               if (temp_aggregator->num_of_ports == 0) {
+                               if (__agg_active_ports(temp_aggregator) == 0) {
                                        select_new_active_agg = temp_aggregator->is_active;
                                        ad_clear_agg(temp_aggregator);
                                        if (select_new_active_agg) {
@@ -2432,7 +2456,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
  */
 void bond_3ad_handle_link_change(struct slave *slave, char link)
 {
+       struct aggregator *agg;
        struct port *port;
+       bool dummy;
 
        port = &(SLAVE_AD_INFO(slave)->port);
 
@@ -2459,6 +2485,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->is_enabled = false;
                ad_update_actor_keys(port, true);
        }
+       agg = __get_first_agg(port);
+       ad_agg_selection_logic(agg, &dummy);
+
        netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
                   port->actor_port_number,
                   link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2499,7 +2528,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
        active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
        if (active) {
                /* are enough slaves available to consider link up? */
-               if (active->num_of_ports < bond->params.min_links) {
+               if (__agg_active_ports(active) < bond->params.min_links) {
                        if (netif_carrier_ok(bond->dev)) {
                                netif_carrier_off(bond->dev);
                                goto out;
index c5ac160a8ae954d1104084c3d94795725cc261ae..551f0f8dead3945cf1516aeff01afe6c74117072 100644 (file)
 
 
 
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
        0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 };
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
        0x33, 0x33, 0x00, 0x00, 0x00, 0x01
 };
 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
index 941ec99cd3b69b6c9d3596e5b65b6aa89bc960c7..a2afa3be17a4bcc4e662a17e49f5c6bc31bce4d3 100644 (file)
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        /* check for initial state */
+       new_slave->link = BOND_LINK_NOCHANGE;
        if (bond->params.miimon) {
                if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
                        if (bond->params.updelay) {
index db760e84119fcb970b7b34f7c4fac92b1acfed52..b8df0f5e8c25ae35b1ce368b04536c40761a1c96 100644 (file)
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
        if (err < 0)
                return err;
 
-       return register_netdevice(bond_dev);
+       err = register_netdevice(bond_dev);
+
+       netif_carrier_off(bond_dev);
+
+       return err;
 }
 
 static size_t bond_get_size(const struct net_device *bond_dev)
index 8b3275d7792acbab2d0ba9efe9fe3e2a6b283231..8f5e93cb79752703c141704011490535664b941e 100644 (file)
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
 
        /* upper group completed, look again in lower */
        if (priv->rx_next > get_mb_rx_low_last(priv) &&
-           quota > 0 && mb > get_mb_rx_last(priv)) {
+           mb > get_mb_rx_last(priv)) {
                priv->rx_next = get_mb_rx_first(priv);
-               goto again;
+               if (quota > 0)
+                       goto again;
        }
 
        return received;
index f91b094288dad3d86064f24a33a97ad58756f3ca..e3dccd3200d5d834f13ad036c290c51e3091052e 100644 (file)
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
 
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
 
-       for (i = 0; i < frame->can_dlc; i += 2) {
-               priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
-                               frame->data[i] | (frame->data[i + 1] << 8));
+       if (priv->type == BOSCH_D_CAN) {
+               u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+               for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+                       data = (u32)frame->data[i];
+                       data |= (u32)frame->data[i + 1] << 8;
+                       data |= (u32)frame->data[i + 2] << 16;
+                       data |= (u32)frame->data[i + 3] << 24;
+                       priv->write_reg32(priv, dreg, data);
+               }
+       } else {
+               for (i = 0; i < frame->can_dlc; i += 2) {
+                       priv->write_reg(priv,
+                                       C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+                                       frame->data[i] |
+                                       (frame->data[i + 1] << 8));
+               }
        }
 }
 
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
        } else {
                int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
 
-               for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
-                       data = priv->read_reg(priv, dreg);
-                       frame->data[i] = data;
-                       frame->data[i + 1] = data >> 8;
+               if (priv->type == BOSCH_D_CAN) {
+                       for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+                               data = priv->read_reg32(priv, dreg);
+                               frame->data[i] = data;
+                               frame->data[i + 1] = data >> 8;
+                               frame->data[i + 2] = data >> 16;
+                               frame->data[i + 3] = data >> 24;
+                       }
+               } else {
+                       for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+                               data = priv->read_reg(priv, dreg);
+                               frame->data[i] = data;
+                               frame->data[i + 1] = data >> 8;
+                       }
                }
        }
 
index 910c12e2638e3615084ca9f6a148030c8f2358e1..ad535a854e5cf28309a71547b49e411db85b385b 100644 (file)
@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
         * - control mode with CAN_CTRLMODE_FD set
         */
 
+       if (!data)
+               return 0;
+
        if (data[IFLA_CAN_CTRLMODE]) {
                struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
 
@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
        return -EOPNOTSUPP;
 }
 
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+       return;
+}
+
 static struct rtnl_link_ops can_link_ops __read_mostly = {
        .kind           = "can",
        .maxtype        = IFLA_CAN_MAX,
@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
        .validate       = can_validate,
        .newlink        = can_newlink,
        .changelink     = can_changelink,
+       .dellink        = can_dellink,
        .get_size       = can_get_size,
        .fill_info      = can_fill_info,
        .get_xstats_size = can_get_xstats_size,
index bcb272f6c68a9ac242964af7901bf6597a499306..8483a40e7e9ef52327e15e03fa4100c73f68a53f 100644 (file)
@@ -16,7 +16,8 @@ config CAN_ESD_USB2
 config CAN_GS_USB
        tristate "Geschwister Schneider UG interfaces"
        ---help---
-         This driver supports the Geschwister Schneider USB/CAN devices.
+         This driver supports the Geschwister Schneider and bytewerk.org
+         candleLight USB CAN interfaces USB/CAN devices
          If unsure choose N,
          choose Y for built in support,
          M to compile as module (module will be named: gs_usb).
@@ -46,6 +47,8 @@ config CAN_KVASER_USB
            - Kvaser USBcan R
            - Kvaser Leaf Light v2
            - Kvaser Mini PCI Express HS
+           - Kvaser Mini PCI Express 2xHS
+           - Kvaser USBcan Light 2xHS
            - Kvaser USBcan II HS/HS
            - Kvaser USBcan II HS/LS
            - Kvaser USBcan Rugged ("USBcan Rev B")
index 1556d428623531026ef58fe15260ff2e88a04372..acb0c8490673a081fca9738659235f0673e5167d 100644 (file)
@@ -1,7 +1,9 @@
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
  *
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
  * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
  *
  * Many thanks to all socketcan devs!
  *
@@ -29,6 +31,9 @@
 #define USB_GSUSB_1_VENDOR_ID      0x1d50
 #define USB_GSUSB_1_PRODUCT_ID     0x606f
 
+#define USB_CANDLELIGHT_VENDOR_ID  0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
 #define GSUSB_ENDPOINT_IN          1
 #define GSUSB_ENDPOINT_OUT         2
 
@@ -952,6 +957,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
 static const struct usb_device_id gs_usb_table[] = {
        { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
                                      USB_GSUSB_1_PRODUCT_ID, 0) },
+       { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+                                     USB_CANDLELIGHT_PRODUCT_ID, 0) },
        {} /* Terminating entry */
 };
 
@@ -969,5 +976,6 @@ module_usb_driver(gs_usb_driver);
 MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
 MODULE_DESCRIPTION(
 "Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
 MODULE_LICENSE("GPL v2");
index 022bfa13ebfa0c85491cfc2cb17a3437c01f0205..6f1f3b675ff553a96d4342b619e34a606fa9be29 100644 (file)
 #define USB_CAN_R_PRODUCT_ID           39
 #define USB_LEAF_LITE_V2_PRODUCT_ID    288
 #define USB_MINI_PCIE_HS_PRODUCT_ID    289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID        291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID   292
 
 static inline bool kvaser_is_leaf(const struct usb_device_id *id)
 {
        return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
-              id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+              id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
 }
 
 /* Kvaser USBCan-II devices */
@@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
                .driver_info = KVASER_HAS_TXRX_ERRORS },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
 
        /* USBCANII family IDs */
        { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
index 30defe6c81f22d6463c4df091a421014392f6ff2..821d86c38ab214a9c450f3079398b1d3a4af2e24 100644 (file)
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
        unsigned long flags;
 
        /* If the device is closed, ignore the timeout */
-       if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+       if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
                return;
 
        /* Any nonrecoverable hardware error?
index e0fb0f1122db42553c0ed4a907d89a1e03e1f209..20760e10211a48ba15daa72a5c4322c90ceb0157 100644 (file)
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
         * on the current MAC's MII bus
         */
        for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
-               if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
-                       phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+               if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+                       phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
                        if (!aup->phy_search_highest_addr)
                                /* break out with first one found */
                                break;
index d02c4240b7df1fab72f1ff102d43361957d74078..8fc93c5f6abca337e01f6939678cadd24f36b0f1 100644 (file)
@@ -96,10 +96,6 @@ struct alx_priv {
        unsigned int rx_ringsz;
        unsigned int rxbuf_size;
 
-       struct page  *rx_page;
-       unsigned int rx_page_offset;
-       unsigned int rx_frag_size;
-
        struct napi_struct napi;
        struct alx_tx_queue txq;
        struct alx_rx_queue rxq;
index c98acdc0d14f5304918d922372b635bf38c6f666..e708e360a9e3919df7b4e7b6e8500e5c27d24f55 100644 (file)
@@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
        }
 }
 
-static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
-{
-       struct sk_buff *skb;
-       struct page *page;
-
-       if (alx->rx_frag_size > PAGE_SIZE)
-               return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
-
-       page = alx->rx_page;
-       if (!page) {
-               alx->rx_page = page = alloc_page(gfp);
-               if (unlikely(!page))
-                       return NULL;
-               alx->rx_page_offset = 0;
-       }
-
-       skb = build_skb(page_address(page) + alx->rx_page_offset,
-                       alx->rx_frag_size);
-       if (likely(skb)) {
-               alx->rx_page_offset += alx->rx_frag_size;
-               if (alx->rx_page_offset >= PAGE_SIZE)
-                       alx->rx_page = NULL;
-               else
-                       get_page(page);
-       }
-       return skb;
-}
-
-
 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
@@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
        while (!cur_buf->skb && next != rxq->read_idx) {
                struct alx_rfd *rfd = &rxq->rfd[cur];
 
-               skb = alx_alloc_skb(alx, gfp);
+               /*
+                * When DMA RX address is set to something like
+                * 0x....fc0, it will be very likely to cause DMA
+                * RFD overflow issue.
+                *
+                * To work around it, we apply rx skb with 64 bytes
+                * longer space, and offset the address whenever
+                * 0x....fc0 is detected.
+                */
+               skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
                if (!skb)
                        break;
+
+               if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+                       skb_reserve(skb, 64);
+
                dma = dma_map_single(&alx->hw.pdev->dev,
                                     skb->data, alx->rxbuf_size,
                                     DMA_FROM_DEVICE);
@@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
                alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
        }
 
-
        return count;
 }
 
@@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx)
        kfree(alx->txq.bufs);
        kfree(alx->rxq.bufs);
 
-       if (alx->rx_page) {
-               put_page(alx->rx_page);
-               alx->rx_page = NULL;
-       }
-
        dma_free_coherent(&alx->hw.pdev->dev,
                          alx->descmem.size,
                          alx->descmem.virt,
@@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx)
                                  alx->dev->name, alx);
                if (!err)
                        goto out;
-
                /* fall back to legacy interrupt */
                pci_disable_msi(alx->hw.pdev);
        }
@@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx)
        struct pci_dev *pdev = alx->hw.pdev;
        struct alx_hw *hw = &alx->hw;
        int err;
-       unsigned int head_size;
 
        err = alx_identify_hw(alx);
        if (err) {
@@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx)
 
        hw->smb_timer = 400;
        hw->mtu = alx->dev->mtu;
-
        alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
-       head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
-                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-       alx->rx_frag_size = roundup_pow_of_two(head_size);
-
        alx->tx_ringsz = 256;
        alx->rx_ringsz = 512;
        hw->imt = 200;
@@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
 {
        struct alx_priv *alx = netdev_priv(netdev);
        int max_frame = ALX_MAX_FRAME_LEN(mtu);
-       unsigned int head_size;
 
        if ((max_frame < ALX_MIN_FRAME_SIZE) ||
            (max_frame > ALX_MAX_FRAME_SIZE))
@@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
        netdev->mtu = mtu;
        alx->hw.mtu = mtu;
        alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
-       head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
-                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-       alx->rx_frag_size = roundup_pow_of_two(head_size);
        netdev_update_features(netdev);
        if (netif_running(netdev))
                alx_reinit(alx);
index 08a23e6b60e947894783f2115c2fe16abece84bc..1a3555d03a96a823e4936c06a2e9ce64d27ae851 100644 (file)
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
                if (err) {
                        netdev_err(dev, "rx buffer allocation failed\n");
                        dev->stats.rx_dropped++;
+                       dev_kfree_skb(skb);
                        return;
                }
 
index 543bf38105c9240d9ae374708377755c5e4db9a6..bfa26a2590c979b41bc6c46aee275d730e1ce89d 100644 (file)
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(u32 *)p;
+               data[i] = *(unsigned long *)p;
        }
 }
 
index ee5f431ab32af4f2b7c175f836ac10f697016e1e..25bbae5928d43a4d911d0d5852612d8524437cdb 100644 (file)
@@ -231,7 +231,7 @@ err_dma:
        dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
                         DMA_TO_DEVICE);
 
-       while (i > 0) {
+       while (i-- > 0) {
                int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
                struct bgmac_slot_info *slot = &ring->slots[index];
                u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
@@ -267,15 +267,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
        while (ring->start != ring->end) {
                int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
                struct bgmac_slot_info *slot = &ring->slots[slot_idx];
-               u32 ctl1;
+               u32 ctl0, ctl1;
                int len;
 
                if (slot_idx == empty_slot)
                        break;
 
+               ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
                ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
                len = ctl1 & BGMAC_DESC_CTL1_LEN;
-               if (ctl1 & BGMAC_DESC_CTL0_SOF)
+               if (ctl0 & BGMAC_DESC_CTL0_SOF)
                        /* Unmap no longer used buffer */
                        dma_unmap_single(dma_dev, slot->dma_addr, len,
                                         DMA_TO_DEVICE);
@@ -1312,7 +1313,8 @@ static int bgmac_open(struct net_device *net_dev)
 
        phy_start(bgmac->phy_dev);
 
-       netif_carrier_on(net_dev);
+       netif_start_queue(net_dev);
+
        return 0;
 }
 
index a38cb047b54083897fa6e8df5c098c7e1b98d7ac..1b0ae4a72e9ecd81a1d404c5e6edadb3f00ce8cc 100644 (file)
@@ -1591,7 +1591,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
 {
        struct bnxt *bp = netdev_priv(dev);
        u16  start = eeprom->offset, length = eeprom->len;
-       int rc;
+       int rc = 0;
 
        memset(data, 0, eeprom->len);
 
index 8de79ae63231b0ad21e47edb9a150e026adf6bd3..0e7e7da8d201c0b3af122c542ef42467c03cb94c 100644 (file)
@@ -2821,7 +2821,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                if (!g) {
                        netif_info(lio, tx_err, lio->netdev,
                                   "Transmit scatter gather: glist null!\n");
-                       goto lio_xmit_failed;
+                       goto lio_xmit_dma_failed;
                }
 
                cmdsetup.s.gather = 1;
@@ -2892,7 +2892,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
        else
                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
        if (status == IQ_SEND_FAILED)
-               goto lio_xmit_failed;
+               goto lio_xmit_dma_failed;
 
        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
 
@@ -2906,12 +2906,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        return NETDEV_TX_OK;
 
+lio_xmit_dma_failed:
+       dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+                        ndata.datasize, DMA_TO_DEVICE);
 lio_xmit_failed:
        stats->tx_dropped++;
        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
                   iq_no, stats->tx_dropped);
-       dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
-                        ndata.datasize, DMA_TO_DEVICE);
        recv_buffer_free(skb);
        return NETDEV_TX_OK;
 }
index 95f17f8cadacc08637c485e555242e78aafb48c5..16ed20357c5c30d2cbb786cb414a6d4544fecf83 100644 (file)
@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
        u32 rr_quantum;
        u8 sq_idx = sq->sq_num;
        u8 pqs_vnic;
+       int svf;
 
        if (sq->sqs_mode)
                pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
        /* 24 bytes for FCS, IPG and preamble */
        rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
 
-       tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+       if (!sq->sqs_mode) {
+               tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+       } else {
+               for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+                       if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+                               break;
+               }
+               tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+               tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+               tl4 += (svf * NIC_TL4_PER_LMAC);
+               tl4 += (bgx * NIC_TL4_PER_BGX);
+       }
        tl4 += sq_idx;
-       if (sq->sqs_mode)
-               tl4 += vnic * 8;
 
        tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
        nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
index 3ed21988626b5ffaceb4e72e0276c63cee1c4347..63a39ac97d53beb65e562bba0a9fb1b86f7baac7 100644 (file)
@@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
        }
 
        /* Clear rcvflt bit (latching high) and read it back */
-       bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+       if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+               bgx_reg_modify(bgx, lmacid,
+                              BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
        if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
                dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
                if (bgx->use_training) {
@@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
                return -1;
        }
 
-       /* Wait for MAC RX to be ready */
-       if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
-                        SMU_RX_CTL_STATUS, true)) {
-               dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
-               return -1;
-       }
-
        /* Wait for BGX RX to be idle */
        if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
                dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
                return -1;
        }
 
-       if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
-               dev_err(&bgx->pdev->dev, "Receive fault\n");
-               return -1;
-       }
-
-       /* Receive link is latching low. Force it high and verify it */
-       bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
-       if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
-                        SPU_STATUS1_RCV_LNK, false)) {
-               dev_err(&bgx->pdev->dev, "SPU receive link down\n");
-               return -1;
-       }
-
+       /* Clear receive packet disable */
        cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
        cfg &= ~SPU_MISC_CTL_RX_DIS;
        bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
-       return 0;
+
+       /* Check for MAC RX faults */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+       /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+       cfg &= SMU_RX_CTL_STATUS;
+       if (!cfg)
+               return 0;
+
+       /* Rx local/remote fault seen.
+        * Do lmac reinit to see if condition recovers
+        */
+       bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+       return -1;
 }
 
 static void bgx_poll_for_link(struct work_struct *work)
 {
        struct lmac *lmac;
-       u64 link;
+       u64 spu_link, smu_link;
 
        lmac = container_of(work, struct lmac, dwork.work);
 
@@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
        bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
                     SPU_STATUS1_RCV_LNK, false);
 
-       link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
-       if (link & SPU_STATUS1_RCV_LNK) {
+       spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+       smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+       if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+           !(smu_link & SMU_RX_CTL_STATUS)) {
                lmac->link_up = 1;
                if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
                        lmac->last_speed = 40000;
@@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
        }
 
        if (lmac->last_link != lmac->link_up) {
+               if (lmac->link_up) {
+                       if (bgx_xaui_check_link(lmac)) {
+                               /* Errors, clear link_up state */
+                               lmac->link_up = 0;
+                               lmac->last_speed = SPEED_UNKNOWN;
+                               lmac->last_duplex = DUPLEX_UNKNOWN;
+                       }
+               }
                lmac->last_link = lmac->link_up;
-               if (lmac->link_up)
-                       bgx_xaui_check_link(lmac);
        }
 
        queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
 {
        struct lmac *lmac;
-       u64 cmrx_cfg;
+       u64 cfg;
 
        lmac = &bgx->lmac[lmacid];
        if (lmac->check_link) {
@@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
                destroy_workqueue(lmac->check_link);
        }
 
-       cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
-       cmrx_cfg &= ~(1 << 15);
-       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+       /* Disable packet reception */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_PKT_RX_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+       /* Give chance for Rx/Tx FIFO to get drained */
+       bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+       bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+       /* Disable packet transmission */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_PKT_TX_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+       /* Disable serdes lanes */
+        if (!lmac->is_sgmii)
+                bgx_reg_modify(bgx, lmacid,
+                               BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+        else
+                bgx_reg_modify(bgx, lmacid,
+                               BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+       /* Disable LMAC */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
        bgx_flush_dmac_addrs(bgx, lmacid);
 
        if ((bgx->lmac_type != BGX_MODE_XFI) &&
index 149e179363a1e6dd71e0b7cbd00f26a5e5ef0116..42010d2e5ddf4e3d46eab4b2c9cee6caac5e252a 100644 (file)
@@ -41,6 +41,7 @@
 #define BGX_CMRX_RX_STAT10             0xC0
 #define BGX_CMRX_RX_BP_DROP            0xC8
 #define BGX_CMRX_RX_DMAC_CTL           0x0E8
+#define BGX_CMRX_RX_FIFO_LEN           0x108
 #define BGX_CMR_RX_DMACX_CAM           0x200
 #define  RX_DMACX_CAM_EN                       BIT_ULL(48)
 #define  RX_DMACX_CAM_LMACID(x)                        (x << 49)
@@ -50,6 +51,7 @@
 #define BGX_CMR_CHAN_MSK_AND           0x450
 #define BGX_CMR_BIST_STATUS            0x460
 #define BGX_CMR_RX_LMACS               0x468
+#define BGX_CMRX_TX_FIFO_LEN           0x518
 #define BGX_CMRX_TX_STAT0              0x600
 #define BGX_CMRX_TX_STAT1              0x608
 #define BGX_CMRX_TX_STAT2              0x610
index c4b262ca7d43623fba1cef20dfc6c03ebeadfd7f..2accab38632327ae007baf1ef293dca1ef7da7d9 100644 (file)
@@ -36,8 +36,8 @@
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
 #define T4FW_MIN_VERSION_MICRO 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
 #define T5FW_MIN_VERSION_MICRO 0x00
 
 #define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index 4edb98c3c6c70a9c620caa7928e8e5d555db1242..4466a11871109b41861cd1be9ff3dc70d207512f 100644 (file)
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int entry;
        void *dest;
 
+       if (skb_put_padto(skb, ETHOC_ZLEN)) {
+               dev->stats.tx_errors++;
+               goto out_no_free;
+       }
+
        if (unlikely(skb->len > ETHOC_BUFSIZ)) {
                dev->stats.tx_errors++;
                goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
 out:
        dev_kfree_skb(skb);
+out_no_free:
        return NETDEV_TX_OK;
 }
 
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
        if (!priv->iobase) {
                dev_err(&pdev->dev, "cannot remap I/O memory space\n");
                ret = -ENXIO;
-               goto error;
+               goto free;
        }
 
        if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
                if (!priv->membase) {
                        dev_err(&pdev->dev, "cannot remap memory space\n");
                        ret = -ENXIO;
-                       goto error;
+                       goto free;
                }
        } else {
                /* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
                                buffer_size);
                        ret = -ENOMEM;
-                       goto error;
+                       goto free;
                }
                netdev->mem_end = netdev->mem_start + buffer_size;
                priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
                128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
        if (num_bd < 4) {
                ret = -ENODEV;
-               goto error;
+               goto free;
        }
        priv->num_bd = num_bd;
        /* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
        if (!priv->vma) {
                ret = -ENOMEM;
-               goto error;
+               goto free;
        }
 
        /* Allow the platform setup code to pass in a MAC address. */
index 06f031715b578c897863c906b07176589b862f07..9b7a3f5a2818f1fdc7dfe183991a2a7d3ab03523 100644 (file)
@@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
        ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
        usleep_range(10, 20);
+       ge_rst_value = 0;
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
 
        /* Tx fifo reset sequence */
index 864cb21351a410bf1cd7059cf9952a6010784860..88f3c85fb04ada575e367042d79f4b8188e12245 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/uaccess.h>
 #include <asm/firmware.h>
 #include <linux/seq_file.h>
+#include <linux/workqueue.h>
 
 #include "ibmvnic.h"
 
@@ -89,6 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
 static int ibmvnic_remove(struct vio_dev *);
 static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -469,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
        ibmvnic_send_crq(adapter, &crq);
 
-       netif_start_queue(netdev);
+       netif_tx_start_all_queues(netdev);
+
        return 0;
 
 bounce_map_failed:
@@ -519,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_disable(&adapter->napi[i]);
 
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
 
        if (adapter->bounce_buffer) {
                if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1212,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
                goto reg_failed;
        }
 
-       scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
-       if (scrq->irq == NO_IRQ) {
-               dev_err(dev, "Error mapping irq\n");
-               goto map_irq_failed;
-       }
-
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
        scrq->cur = 0;
@@ -1230,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
        return scrq;
 
-map_irq_failed:
-       do {
-               rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
-                                       adapter->vdev->unit_address,
-                                       scrq->crq_num);
-       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 reg_failed:
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
@@ -1256,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->tx_scrq[i]) {
                                free_irq(adapter->tx_scrq[i]->irq,
                                         adapter->tx_scrq[i]);
+                               irq_dispose_mapping(adapter->tx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->tx_scrq[i]);
                        }
@@ -1267,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->rx_scrq[i]) {
                                free_irq(adapter->rx_scrq[i]->irq,
                                         adapter->rx_scrq[i]);
+                               irq_dispose_mapping(adapter->rx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->rx_scrq[i]);
                        }
@@ -1276,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
        adapter->requested_caps = 0;
 }
 
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+       int i;
+
+       if (adapter->tx_scrq) {
+               for (i = 0; i < adapter->req_tx_queues; i++)
+                       if (adapter->tx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->tx_scrq[i]);
+               adapter->tx_scrq = NULL;
+       }
+
+       if (adapter->rx_scrq) {
+               for (i = 0; i < adapter->req_rx_queues; i++)
+                       if (adapter->rx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->rx_scrq[i]);
+               adapter->rx_scrq = NULL;
+       }
+
+       adapter->requested_caps = 0;
+}
+
 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
                            struct ibmvnic_sub_crq_queue *scrq)
 {
@@ -1395,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
        return IRQ_HANDLED;
 }
 
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+       struct device *dev = &adapter->vdev->dev;
+       struct ibmvnic_sub_crq_queue *scrq;
+       int i = 0, j = 0;
+       int rc = 0;
+
+       for (i = 0; i < adapter->req_tx_queues; i++) {
+               scrq = adapter->tx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_tx_irq_failed;
+               }
+
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+                                0, "ibmvnic_tx", scrq);
+
+               if (rc) {
+                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+
+       for (i = 0; i < adapter->req_rx_queues; i++) {
+               scrq = adapter->rx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_rx_irq_failed;
+               }
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+                                0, "ibmvnic_rx", scrq);
+               if (rc) {
+                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+       return rc;
+
+req_rx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       i = adapter->req_tx_queues;
+req_tx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       release_sub_crqs_no_irqs(adapter);
+       return rc;
+}
+
 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 {
        struct device *dev = &adapter->vdev->dev;
@@ -1403,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        union ibmvnic_crq crq;
        int total_queues;
        int more = 0;
-       int i, j;
-       int rc;
+       int i;
 
        if (!retry) {
                /* Sub-CRQ entries are 32 byte long */
@@ -1483,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_tx_queues; i++) {
                adapter->tx_scrq[i] = allqueues[i];
                adapter->tx_scrq[i]->pool_index = i;
-               rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
-                                0, "ibmvnic_tx", adapter->tx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
-                               adapter->tx_scrq[i]->irq, rc);
-                       goto req_tx_irq_failed;
-               }
        }
 
        adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1500,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_rx_queues; i++) {
                adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
                adapter->rx_scrq[i]->scrq_num = i;
-               rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
-                                0, "ibmvnic_rx", adapter->rx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
-                               adapter->rx_scrq[i]->irq, rc);
-                       goto req_rx_irq_failed;
-               }
        }
 
        memset(&crq, 0, sizeof(crq));
@@ -1559,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 
        return;
 
-req_rx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
-       i = adapter->req_tx_queues;
-req_tx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
-       kfree(adapter->rx_scrq);
-       adapter->rx_scrq = NULL;
 rx_failed:
        kfree(adapter->tx_scrq);
        adapter->tx_scrq = NULL;
@@ -2121,7 +2173,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
                                  struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
+       struct ibmvnic_error_buff *error_buff, *tmp;
        unsigned long flags;
        bool found = false;
        int i;
@@ -2133,7 +2185,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
        }
 
        spin_lock_irqsave(&adapter->error_list_lock, flags);
-       list_for_each_entry(error_buff, &adapter->errors, list)
+       list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
                if (error_buff->error_id == crq->request_error_rsp.error_id) {
                        found = true;
                        list_del(&error_buff->list);
@@ -2348,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be32_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs(adapter);
+               release_sub_crqs_no_irqs(adapter);
                *req_value = be32_to_cpu(crq->request_capability_rsp.number);
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 1);
                return;
        default:
                dev_err(dev, "Error %d in request cap rsp\n",
@@ -2659,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
 
 out:
        if (atomic_read(&adapter->running_cap_queries) == 0)
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 0);
                /* We're done querying the capabilities, initialize sub-crqs */
 }
 
@@ -3141,14 +3193,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
 
 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
 {
-       struct ibmvnic_inflight_cmd *inflight_cmd;
+       struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
+       struct ibmvnic_error_buff *error_buff, *tmp2;
        unsigned long flags;
        unsigned long flags2;
 
        spin_lock_irqsave(&adapter->inflight_lock, flags);
-       list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+       list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
                switch (inflight_cmd->crq.generic.cmd) {
                case LOGIN:
                        dma_unmap_single(dev, adapter->login_buf_token,
@@ -3165,8 +3217,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
                        break;
                case REQUEST_ERROR_INFO:
                        spin_lock_irqsave(&adapter->error_list_lock, flags2);
-                       list_for_each_entry(error_buff, &adapter->errors,
-                                           list) {
+                       list_for_each_entry_safe(error_buff, tmp2,
+                                                &adapter->errors, list) {
                                dma_unmap_single(dev, error_buff->dma,
                                                 error_buff->len,
                                                 DMA_FROM_DEVICE);
@@ -3202,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                        dev_info(dev, "Partner initialized\n");
                        /* Send back a response */
                        rc = ibmvnic_send_crq_init_complete(adapter);
-                       if (rc == 0)
-                               send_version_xchg(adapter);
+                       if (!rc)
+                               schedule_work(&adapter->vnic_crq_init);
                        else
                                dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
                        break;
@@ -3555,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
        .release        = single_release,
 };
 
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+       struct ibmvnic_adapter *adapter = container_of(work,
+                                                      struct ibmvnic_adapter,
+                                                      vnic_crq_init);
+       struct device *dev = &adapter->vdev->dev;
+       struct net_device *netdev = adapter->netdev;
+       unsigned long timeout = msecs_to_jiffies(30000);
+       int rc;
+
+       send_version_xchg(adapter);
+       reinit_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+               dev_err(dev, "Passive init timeout\n");
+               goto task_failed;
+       }
+
+       do {
+               if (adapter->renegotiate) {
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
+                       send_cap_queries(adapter);
+
+                       reinit_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout)) {
+                               dev_err(dev, "Passive init timeout\n");
+                               goto task_failed;
+                       }
+               }
+       } while (adapter->renegotiate);
+       rc = init_sub_crq_irqs(adapter);
+
+       if (rc)
+               goto task_failed;
+
+       netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+       rc = register_netdev(netdev);
+       if (rc) {
+               dev_err(dev,
+                       "failed to register netdev rc=%d\n", rc);
+               goto register_failed;
+       }
+       dev_info(dev, "ibmvnic registered\n");
+
+       return;
+
+register_failed:
+       release_sub_crqs(adapter);
+task_failed:
+       dev_err(dev, "Passive initialization was not successful\n");
+}
+
 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
+       unsigned long timeout = msecs_to_jiffies(30000);
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
        unsigned char *mac_addr_p;
@@ -3593,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netdev->ethtool_ops = &ibmvnic_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
+       INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
        spin_lock_init(&adapter->stats_lock);
 
        rc = ibmvnic_init_crq_queue(adapter);
@@ -3635,30 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        ibmvnic_send_crq_init(adapter);
 
        init_completion(&adapter->init_done);
-       wait_for_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+               return 0;
 
        do {
-               adapter->renegotiate = false;
-
-               init_sub_crqs(adapter, 0);
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
-
                if (adapter->renegotiate) {
-                       release_sub_crqs(adapter);
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
                        send_cap_queries(adapter);
 
                        reinit_completion(&adapter->init_done);
-                       wait_for_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout))
+                               return 0;
                }
        } while (adapter->renegotiate);
 
-       /* if init_sub_crqs is partially successful, retry */
-       while (!adapter->tx_scrq || !adapter->rx_scrq) {
-               init_sub_crqs(adapter, 1);
-
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
+       rc = init_sub_crq_irqs(adapter);
+       if (rc) {
+               dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+               goto free_debugfs;
        }
 
        netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3666,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        rc = register_netdev(netdev);
        if (rc) {
                dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
-               goto free_debugfs;
+               goto free_sub_crqs;
        }
        dev_info(&dev->dev, "ibmvnic registered\n");
 
        return 0;
 
+free_sub_crqs:
+       release_sub_crqs(adapter);
 free_debugfs:
        if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
                debugfs_remove_recursive(adapter->debugfs_dir);
index 0b66a506a4e44e4d36aae861c8ba6289318cd8e1..e82898fd518ef890afbbf79c307833b8127e782c 100644 (file)
@@ -1045,4 +1045,6 @@ struct ibmvnic_adapter {
        u64 opt_rxba_entries_per_subcrq;
        __be64 tx_rx_desc_req;
        u8 map_id;
+
+       struct work_struct vnic_crq_init;
 };
index 75e60897b7e748bca73317ae0e6d9247e6a9bbf9..2b2e2f8c636994219e997bdb53ff8d3379275bb9 100644 (file)
@@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
 }
 
 /**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
  * @adapter: board private structure to initialize
  **/
 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -6915,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
        if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
                features &= ~NETIF_F_RXFCS;
 
+       /* Since there is no support for separate Rx/Tx vlan accel
+        * enable/disable make sure Tx flag is always in same state as Rx.
+        */
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               features |= NETIF_F_HW_VLAN_CTAG_TX;
+       else
+               features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
        return features;
 }
 
index 5ea22008d721d0a0ad769f32b7ee74ef8b19249d..501f15d9f4d6eef599733f12e7052fedc1563186 100644 (file)
@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
        if (!vsi || !macaddr)
                return NULL;
 
+       /* Do not allow broadcast filter to be added since broadcast filter
+        * is added as part of add VSI for any newly created VSI except
+        * FDIR VSI
+        */
+       if (is_broadcast_ether_addr(macaddr))
+               return NULL;
+
        f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
        if (!f) {
                f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                         aq_ret, pf->hw.aq.asq_last_status);
                        }
                }
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-                                                  vsi->seid,
-                                                  cur_promisc, NULL);
-               if (aq_ret) {
-                       retval = i40e_aq_rc_to_posix(aq_ret,
-                                                    pf->hw.aq.asq_last_status);
-                       dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %s, aq_err %s\n",
-                                i40e_stat_str(&pf->hw, aq_ret),
-                                i40e_aq_str(&pf->hw,
-                                            pf->hw.aq.asq_last_status));
-               }
        }
 out:
        /* if something went wrong then set the changed flag so we try again */
@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
  * @vsi: the VSI being configured
  * @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
  *
  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
  **/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
 {
        struct i40e_q_vector *q_vector;
 
@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 
        q_vector->vsi = vsi;
        q_vector->v_idx = v_idx;
-       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
        if (vsi->netdev)
                netif_napi_add(vsi->netdev, &q_vector->napi,
                               i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       int v_idx, num_q_vectors;
-       int err;
+       int err, v_idx, num_q_vectors, current_cpu;
 
        /* if not MSIX, give the one vector only to the LAN VSI */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
        else
                return -EINVAL;
 
+       current_cpu = cpumask_first(cpu_online_mask);
+
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+               err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
                if (err)
                        goto err_out;
+               current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+               if (unlikely(current_cpu >= nr_cpu_ids))
+                       current_cpu = cpumask_first(cpu_online_mask);
        }
 
        return 0;
@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 static int i40e_add_vsi(struct i40e_vsi *vsi)
 {
        int ret = -ENODEV;
+       i40e_status aq_ret = 0;
        u8 laa_macaddr[ETH_ALEN];
        bool found_laa_mac_filter = false;
        struct i40e_pf *pf = vsi->back;
@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                vsi->seid = ctxt.seid;
                vsi->id = ctxt.vsi_number;
        }
+       /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+       if (vsi->type != I40E_VSI_FDIR) {
+               aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+               if (aq_ret) {
+                       ret = i40e_aq_rc_to_posix(aq_ret,
+                                                 hw->aq.asq_last_status);
+                       dev_info(&pf->pdev->dev,
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+               }
+       }
 
        spin_lock_bh(&vsi->mac_filter_list_lock);
        /* If macvlan filters already exist, force them to get loaded */
index 55f151fca1dcb785089511105920755fef9397b1..a8868e1bf832557ffa7c68d2840a0b098fdf874d 100644 (file)
@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    union i40e_rx_desc *rx_desc)
 {
        struct i40e_rx_ptype_decoded decoded;
-       bool ipv4, ipv6, tunnel = false;
        u32 rx_error, rx_status;
+       bool ipv4, ipv6;
        u8 ptype;
        u64 qword;
 
@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-       /* The hardware supported by this driver does not validate outer
-        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
-        * with it but the specification states that you "MAY validate", it
-        * doesn't make it a hard requirement so if we have validated the
-        * inner checksum report CHECKSUM_UNNECESSARY.
+       /* If there is an outer header present that might contain a checksum
+        * we need to bump the checksum level by 1 to reflect the fact that
+        * we are indicating we validated the inner checksum.
         */
-       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
-                                 I40E_RX_PTYPE_INNER_PROT_UDP |
-                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
-               tunnel = true;
-
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = tunnel ? 1 : 0;
+       if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+               skb->csum_level = 1;
+
+       /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+       switch (decoded.inner_prot) {
+       case I40E_RX_PTYPE_INNER_PROT_TCP:
+       case I40E_RX_PTYPE_INNER_PROT_UDP:
+       case I40E_RX_PTYPE_INNER_PROT_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* fall though */
+       default:
+               break;
+       }
 
        return;
 
index be99189da925fc5862e9c318489694341a3a68f9..79d99cd91b24317d0cc970a6bdedbb24abeac356 100644 (file)
@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    union i40e_rx_desc *rx_desc)
 {
        struct i40e_rx_ptype_decoded decoded;
-       bool ipv4, ipv6, tunnel = false;
        u32 rx_error, rx_status;
+       bool ipv4, ipv6;
        u8 ptype;
        u64 qword;
 
@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-       /* The hardware supported by this driver does not validate outer
-        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
-        * with it but the specification states that you "MAY validate", it
-        * doesn't make it a hard requirement so if we have validated the
-        * inner checksum report CHECKSUM_UNNECESSARY.
+       /* If there is an outer header present that might contain a checksum
+        * we need to bump the checksum level by 1 to reflect the fact that
+        * we are indicating we validated the inner checksum.
         */
-       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
-                                 I40E_RX_PTYPE_INNER_PROT_UDP |
-                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
-               tunnel = true;
-
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = tunnel ? 1 : 0;
+       if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+               skb->csum_level = 1;
+
+       /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+       switch (decoded.inner_prot) {
+       case I40E_RX_PTYPE_INNER_PROT_TCP:
+       case I40E_RX_PTYPE_INNER_PROT_UDP:
+       case I40E_RX_PTYPE_INNER_PROT_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* fall though */
+       default:
+               break;
+       }
 
        return;
 
index 088c47cf27d97d0f5a8a40992c95ec7c7a761947..8bebd862a54ccd7f4f3defe4fd1a1035188294d6 100644 (file)
@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
-       return 0;
+       return min(work_done, budget - 1);
 }
 
 /**
index 61a80da8b6f0dec4323f094a245fc3fcbb6bcab0..2819abc454c7e71c024ab280ec3a5a1780a07a4b 100644 (file)
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
 static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -IXGBE_ERR_MBX;
+       s32 ret_val = IXGBE_ERR_MBX;
 
        if (!mbx->ops.read)
                goto out;
@@ -111,7 +111,7 @@ out:
 static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -IXGBE_ERR_MBX;
+       s32 ret_val = IXGBE_ERR_MBX;
 
        /* exit if either we can't write or there isn't a defined timeout */
        if (!mbx->ops.write || !mbx->timeout)
index a6d26d351dfc47c777b39c04a44c2f17bca0feab..f92018b13d2869e57ec307029072e3b85ac5041a 100644 (file)
 /* Various constants */
 
 /* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS                1
+#define MVNETA_TXDONE_COAL_PKTS                0       /* interrupt per packet */
 #define MVNETA_RX_COAL_PKTS            32
 #define MVNETA_RX_COAL_USEC            100
 
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
        return 0;
 
 err_free_irq:
+       unregister_cpu_notifier(&pp->cpu_notifier);
+       on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
        mvneta_cleanup_txqs(pp);
index 4763252bbf8557ac98c73876e685d4b7413e3f43..d1cdc2d7615158f612885e100a0cc274c22a3fe9 100644 (file)
@@ -481,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
 /* the qdma core needs scratch memory to be setup */
 static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
-       dma_addr_t phy_ring_head, phy_ring_tail;
+       dma_addr_t phy_ring_tail;
        int cnt = MTK_DMA_SIZE;
        dma_addr_t dma_addr;
        int i;
 
        eth->scratch_ring = dma_alloc_coherent(eth->dev,
                                               cnt * sizeof(struct mtk_tx_dma),
-                                              &phy_ring_head,
+                                              &eth->phy_scratch_ring,
                                               GFP_ATOMIC | __GFP_ZERO);
        if (unlikely(!eth->scratch_ring))
                return -ENOMEM;
 
        eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
                                    GFP_KERNEL);
+       if (unlikely(!eth->scratch_head))
+               return -ENOMEM;
+
        dma_addr = dma_map_single(eth->dev,
                                  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
                                  DMA_FROM_DEVICE);
@@ -502,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
                return -ENOMEM;
 
        memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-       phy_ring_tail = phy_ring_head +
+       phy_ring_tail = eth->phy_scratch_ring +
                        (sizeof(struct mtk_tx_dma) * (cnt - 1));
 
        for (i = 0; i < cnt; i++) {
                eth->scratch_ring[i].txd1 =
                                        (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
                if (i < cnt - 1)
-                       eth->scratch_ring[i].txd2 = (phy_ring_head +
+                       eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
                                ((i + 1) * sizeof(struct mtk_tx_dma)));
                eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
        }
 
-       mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+       mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
        mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
        mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
        mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -671,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 
 err_dma:
        do {
-               tx_buf = mtk_desc_to_tx_buf(ring, txd);
+               tx_buf = mtk_desc_to_tx_buf(ring, itxd);
 
                /* unmap dma */
                mtk_tx_unmap(&dev->dev, tx_buf);
@@ -701,6 +704,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
        return nfrags;
 }
 
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+       int i;
+
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               if (netif_queue_stopped(eth->netdev[i]))
+                       return 1;
+       }
+
+       return 0;
+}
+
 static void mtk_wake_queue(struct mtk_eth *eth)
 {
        int i;
@@ -766,12 +783,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
                goto drop;
 
-       if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+       if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
                mtk_stop_queue(eth);
-               if (unlikely(atomic_read(&ring->free_count) >
-                            ring->thresh))
-                       mtk_wake_queue(eth);
-       }
+
        spin_unlock_irqrestore(&eth->page_lock, flags);
 
        return NETDEV_TX_OK;
@@ -826,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                                          DMA_FROM_DEVICE);
                if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
                        skb_free_frag(new_data);
+                       netdev->stats.rx_dropped++;
                        goto release_desc;
                }
 
@@ -833,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                skb = build_skb(data, ring->frag_size);
                if (unlikely(!skb)) {
                        put_page(virt_to_head_page(new_data));
+                       netdev->stats.rx_dropped++;
                        goto release_desc;
                }
                skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -921,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
                }
                mtk_tx_unmap(eth->dev, tx_buf);
 
-               ring->last_free->txd2 = next_cpu;
                ring->last_free = desc;
                atomic_inc(&ring->free_count);
 
@@ -946,7 +961,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
        if (!total)
                return 0;
 
-       if (atomic_read(&ring->free_count) > ring->thresh)
+       if (mtk_queue_stopped(eth) &&
+           (atomic_read(&ring->free_count) > ring->thresh))
                mtk_wake_queue(eth);
 
        return total;
@@ -1027,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 
        atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
        ring->next_free = &ring->dma[0];
-       ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
-       ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
-                             MAX_SKB_FRAGS);
+       ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+       ring->thresh = MAX_SKB_FRAGS;
 
        /* make sure that all changes to the dma ring are flushed before we
         * continue
@@ -1207,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
        for (i = 0; i < MTK_MAC_COUNT; i++)
                if (eth->netdev[i])
                        netdev_reset_queue(eth->netdev[i]);
+       if (eth->scratch_ring) {
+               dma_free_coherent(eth->dev,
+                                 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+                                 eth->scratch_ring,
+                                 eth->phy_scratch_ring);
+               eth->scratch_ring = NULL;
+               eth->phy_scratch_ring = 0;
+       }
        mtk_tx_clean(eth);
        mtk_rx_clean(eth);
        kfree(eth->scratch_head);
@@ -1269,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
        mtk_w32(eth,
                MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
                MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
-               MTK_RX_BT_32DWORDS,
+               MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
                MTK_QDMA_GLO_CFG);
 
        return 0;
@@ -1383,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
 
        /* disable delay and normal interrupt */
        mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-       mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+       mtk_irq_disable(eth, ~0);
        mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
        mtk_w32(eth, 0, MTK_RST_GL);
 
@@ -1697,7 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
        SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-       eth->netdev[id]->watchdog_timeo = HZ;
+       eth->netdev[id]->watchdog_timeo = 5 * HZ;
        eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
        eth->netdev[id]->base_addr = (unsigned long)eth->base;
        eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
index eed626d56ea4f1d3df5f2ed3aacbd8bded28cead..a5eb7c62306b66116df10a75df115fb4810534d9 100644 (file)
@@ -91,6 +91,7 @@
 #define MTK_QDMA_GLO_CFG       0x1A04
 #define MTK_RX_2B_OFFSET       BIT(31)
 #define MTK_RX_BT_32DWORDS     (3 << 11)
+#define MTK_NDP_CO_PRO         BIT(10)
 #define MTK_TX_WB_DDONE                BIT(6)
 #define MTK_DMA_SIZE_16DWORDS  (2 << 4)
 #define MTK_RX_DMA_BUSY                BIT(3)
@@ -357,6 +358,7 @@ struct mtk_rx_ring {
  * @rx_ring:           Pointer to the memore holding info about the RX ring
  * @rx_napi:           The NAPI struct
  * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring:  physical address of scratch_ring
  * @scratch_head:      The scratch memory that scratch_ring points to.
  * @clk_ethif:         The ethif clock
  * @clk_esw:           The switch clock
@@ -384,6 +386,7 @@ struct mtk_eth {
        struct mtk_rx_ring              rx_ring;
        struct napi_struct              rx_napi;
        struct mtk_tx_dma               *scratch_ring;
+       dma_addr_t                      phy_scratch_ring;
        void                            *scratch_head;
        struct clk                      *clk_ethif;
        struct clk                      *clk_esw;
index e94ca1c3fc7c6a83a190a29d4116086dcc9de8ca..f04a423ff79dd977c081300822a767173fd3c57e 100644 (file)
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
        priv->cmd.free_head = 0;
 
        sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
-       spin_lock_init(&priv->cmd.context_lock);
 
        for (priv->cmd.token_mask = 1;
             priv->cmd.token_mask < priv->cmd.max_cmds;
index fc95affaf76b5d7b36d3c745e7fe271c8442cbb8..44cf16d01f4275501870e3711fa27e5d8ec43a0e 100644 (file)
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        u32 rx_size, tx_size;
        int port_up = 0;
        int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
            tx_size == priv->tx_ring[0]->size)
                return 0;
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.tx_ring_size = tx_size;
+       new_prof.rx_ring_size = rx_size;
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
-       priv->prof->tx_ring_size = tx_size;
-       priv->prof->rx_ring_size = rx_size;
+       mlx4_en_safe_replace_resources(priv, tmp);
 
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
        if (port_up) {
                err = mlx4_en_start_port(dev);
                if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
        }
 
        err = mlx4_en_moderation_update(priv);
-
 out:
+       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
        return err;
 }
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        int port_up = 0;
        int err = 0;
 
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
            !channel->tx_count || !channel->rx_count)
                return -EINVAL;
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.num_tx_rings_p_up = channel->tx_count;
+       new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+       new_prof.rx_ring_num = channel->rx_count;
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
-       priv->num_tx_rings_p_up = channel->tx_count;
-       priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
-       priv->rx_ring_num = channel->rx_count;
-
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
+       mlx4_en_safe_replace_resources(priv, tmp);
 
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
        }
 
        err = mlx4_en_moderation_update(priv);
-
 out:
+       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
        return err;
 }
index 19ceced6736c600798b8ce50f70bb3c9072b05bd..8359e9e51b3b4c99d95238575fbe0285b61fa571 100644 (file)
@@ -406,14 +406,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
        mutex_lock(&mdev->state_lock);
        if (mdev->device_up && priv->port_up) {
                err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
+               if (err) {
                        en_err(priv, "Failed configuring VLAN filter\n");
+                       goto out;
+               }
        }
-       if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
-               en_dbg(HW, priv, "failed adding vlan %d\n", vid);
-       mutex_unlock(&mdev->state_lock);
+       err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+       if (err)
+               en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
 
-       return 0;
+out:
+       mutex_unlock(&mdev->state_lock);
+       return err;
 }
 
 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +425,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
+       int err = 0;
 
        en_dbg(HW, priv, "Killing VID:%d\n", vid);
 
@@ -438,7 +442,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
        }
        mutex_unlock(&mdev->state_lock);
 
-       return 0;
+       return err;
 }
 
 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -1950,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
        return 0;
 }
 
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 {
        int i;
 
@@ -1975,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 
 }
 
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
        int i;
@@ -2032,11 +2036,91 @@ err:
        return -ENOMEM;
 }
 
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+       rtnl_lock();
+       netif_device_detach(dev);
+       mlx4_en_close(dev);
+       rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+                            struct mlx4_en_priv *src,
+                            struct mlx4_en_port_profile *prof)
+{
+       memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+              sizeof(dst->hwtstamp_config));
+       dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+       dst->tx_ring_num = prof->tx_ring_num;
+       dst->rx_ring_num = prof->rx_ring_num;
+       dst->flags = prof->flags;
+       dst->mdev = src->mdev;
+       dst->port = src->port;
+       dst->dev = src->dev;
+       dst->prof = prof;
+       dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+                                        DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+       dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+                               GFP_KERNEL);
+       if (!dst->tx_ring)
+               return -ENOMEM;
+
+       dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+                             GFP_KERNEL);
+       if (!dst->tx_cq) {
+               kfree(dst->tx_ring);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+                               struct mlx4_en_priv *src)
+{
+       memcpy(dst->rx_ring, src->rx_ring,
+              sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+       memcpy(dst->rx_cq, src->rx_cq,
+              sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+       memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+              sizeof(dst->hwtstamp_config));
+       dst->tx_ring_num = src->tx_ring_num;
+       dst->rx_ring_num = src->rx_ring_num;
+       dst->tx_ring = src->tx_ring;
+       dst->tx_cq = src->tx_cq;
+       memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
+                               struct mlx4_en_port_profile *prof)
+{
+       mlx4_en_copy_priv(tmp, priv, prof);
+
+       if (mlx4_en_alloc_resources(tmp)) {
+               en_warn(priv,
+                       "%s: Resource allocation failed, using previous configuration\n",
+                       __func__);
+               kfree(tmp->tx_ring);
+               kfree(tmp->tx_cq);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp)
+{
+       mlx4_en_free_resources(priv);
+       mlx4_en_update_priv(priv, tmp);
+}
 
 void mlx4_en_destroy_netdev(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       bool shutdown = mdev->dev->persist->interface_state &
+                                           MLX4_INTERFACE_STATE_SHUTDOWN;
 
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2044,7 +2128,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        if (priv->registered) {
                devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
                                                              priv->port));
-               unregister_netdev(dev);
+               if (shutdown)
+                       mlx4_en_shutdown(dev);
+               else
+                       unregister_netdev(dev);
        }
 
        if (priv->allocated)
@@ -2064,12 +2151,17 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mdev->upper[priv->port] = NULL;
        mutex_unlock(&mdev->state_lock);
 
+#ifdef CONFIG_RFS_ACCEL
+       mlx4_en_cleanup_filters(priv);
+#endif
+
        mlx4_en_free_resources(priv);
 
        kfree(priv->tx_ring);
        kfree(priv->tx_cq);
 
-       free_netdev(dev);
+       if (!shutdown)
+               free_netdev(dev);
 }
 
 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2447,9 +2539,14 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
         * strip that feature if this is an IPv6 encapsulated frame.
         */
        if (skb->encapsulation &&
-           (skb->ip_summed == CHECKSUM_PARTIAL) &&
-           (ip_hdr(skb)->version != 4))
-               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+           (skb->ip_summed == CHECKSUM_PARTIAL)) {
+               struct mlx4_en_priv *priv = netdev_priv(dev);
+
+               if (!priv->vxlan_port ||
+                   (ip_hdr(skb)->version != 4) ||
+                   (udp_hdr(skb)->dest != priv->vxlan_port))
+                       features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+       }
 
        return features;
 }
@@ -3102,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        int port_up = 0;
        int err = 0;
 
@@ -3118,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
                return -EINVAL;
        }
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
        en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
-               ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+               ts_config.rx_filter,
+               !!(features & NETIF_F_HW_VLAN_CTAG_RX));
 
-       priv->hwtstamp_config.tx_type = ts_config.tx_type;
-       priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+       mlx4_en_safe_replace_resources(priv, tmp);
 
        if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
                if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3164,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
                dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
        }
 
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
        if (port_up) {
                err = mlx4_en_start_port(dev);
                if (err)
@@ -3177,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 
 out:
        mutex_unlock(&mdev->state_lock);
-       netdev_features_change(dev);
+       kfree(tmp);
+       if (!err)
+               netdev_features_change(dev);
        return err;
 }
index c1b3a9c8cf3b4db9412e722e09983e72c64e3caa..99b5407f2278c0f292961cf8b458e4b4dfa584e7 100644 (file)
@@ -514,9 +514,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
        ring->rx_info = NULL;
        kfree(ring);
        *pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
-       mlx4_en_cleanup_filters(priv);
-#endif
 }
 
 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
index 12c77a70abdb451c475680c05bb2b7cf86436cdf..546fab0ecc3bb5e8f8cbaa4fe401dcca1ca070d9 100644 (file)
@@ -3222,6 +3222,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
 
        INIT_LIST_HEAD(&priv->pgdir_list);
        mutex_init(&priv->pgdir_mutex);
+       spin_lock_init(&priv->cmd.context_lock);
 
        INIT_LIST_HEAD(&priv->bf_list);
        mutex_init(&priv->bf_mutex);
@@ -4134,8 +4135,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
 
        mlx4_info(persist->dev, "mlx4_shutdown was called\n");
        mutex_lock(&persist->interface_state_mutex);
-       if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+               /* Notify mlx4 clients that the kernel is being shut down */
+               persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
                mlx4_unload_one(pdev);
+       }
        mutex_unlock(&persist->interface_state_mutex);
 }
 
index 467d47ed2c394093ca8be3fa0ce1f1a26dc1a038..13d297ee34bb3f7fa195891e3959ce62605e580b 100644 (file)
@@ -353,12 +353,14 @@ struct mlx4_en_port_profile {
        u32 rx_ring_num;
        u32 tx_ring_size;
        u32 rx_ring_size;
+       u8 num_tx_rings_p_up;
        u8 rx_pause;
        u8 rx_ppp;
        u8 tx_pause;
        u8 tx_ppp;
        int rss_rings;
        int inline_thold;
+       struct hwtstamp_config hwtstamp_config;
 };
 
 struct mlx4_en_profile {
@@ -623,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
                              u8 rx_ppp, u8 rx_pause,
                              u8 tx_ppp, u8 tx_pause);
 
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
+                               struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp);
 
 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
                      int entries, int ring, enum cq_type mode, int node);
index dcd2df6518de32a8e412b10a548c0973caa15568..d6e2a1cae19ae2d6d636f1d306bca8e607106095 100644 (file)
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+       case MLX5_CMD_OP_2ERR_QP:
+       case MLX5_CMD_OP_2RST_QP:
+       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_RTR2RTS_QP:
        case MLX5_CMD_OP_RTS2RTS_QP:
        case MLX5_CMD_OP_SQERR2RTS_QP:
-       case MLX5_CMD_OP_2ERR_QP:
-       case MLX5_CMD_OP_2RST_QP:
        case MLX5_CMD_OP_QUERY_QP:
        case MLX5_CMD_OP_SQD_RTS_QP:
        case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
        case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
        case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
-       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
        case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
        case MLX5_CMD_OP_SET_ROCE_ADDRESS:
        case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_CREATE_RQT:
        case MLX5_CMD_OP_MODIFY_RQT:
        case MLX5_CMD_OP_QUERY_RQT:
+
        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
-       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -545,6 +549,7 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+       MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
        default: return "unknown command opcode";
        }
 }
@@ -601,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
                pr_debug("\n");
 }
 
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+       struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+       return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+       struct delayed_work *dwork = container_of(work, struct delayed_work,
+                                                 work);
+       struct mlx5_cmd_work_ent *ent = container_of(dwork,
+                                                    struct mlx5_cmd_work_ent,
+                                                    cb_timeout_work);
+       struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+                                                cmd);
+
+       ent->ret = -ETIMEDOUT;
+       mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+                      mlx5_command_str(msg_to_opcode(ent->in)),
+                      msg_to_opcode(ent->in));
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
 static void cmd_work_handler(struct work_struct *work)
 {
        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
        struct mlx5_cmd *cmd = ent->cmd;
        struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+       unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
        struct mlx5_cmd_layout *lay;
        struct semaphore *sem;
        unsigned long flags;
@@ -646,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
 
+       if (ent->callback)
+               schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
        /* ring doorbell after the descriptor is valid */
        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
@@ -690,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
        }
 }
 
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
-       struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
-       return be16_to_cpu(hdr->opcode);
-}
-
 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 {
        unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -705,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 
        if (cmd->mode == CMD_MODE_POLLING) {
                wait_for_completion(&ent->done);
-               err = ent->ret;
-       } else {
-               if (!wait_for_completion_timeout(&ent->done, timeout))
-                       err = -ETIMEDOUT;
-               else
-                       err = 0;
+       } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+               ent->ret = -ETIMEDOUT;
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
        }
+
+       err = ent->ret;
+
        if (err == -ETIMEDOUT) {
                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
                               mlx5_command_str(msg_to_opcode(ent->in)),
@@ -760,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        if (!callback)
                init_completion(&ent->done);
 
+       INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
        INIT_WORK(&ent->work, cmd_work_handler);
        if (page_queue) {
                cmd_work_handler(&ent->work);
@@ -769,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
                goto out_free;
        }
 
-       if (!callback) {
-               err = wait_func(dev, ent);
-               if (err == -ETIMEDOUT)
-                       goto out;
-
-               ds = ent->ts2 - ent->ts1;
-               op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
-               if (op < ARRAY_SIZE(cmd->stats)) {
-                       stats = &cmd->stats[op];
-                       spin_lock_irq(&stats->lock);
-                       stats->sum += ds;
-                       ++stats->n;
-                       spin_unlock_irq(&stats->lock);
-               }
-               mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
-                                  "fw exec time for %s is %lld nsec\n",
-                                  mlx5_command_str(op), ds);
-               *status = ent->status;
-               free_cmd(ent);
-       }
+       if (callback)
+               goto out;
 
-       return err;
+       err = wait_func(dev, ent);
+       if (err == -ETIMEDOUT)
+               goto out_free;
+
+       ds = ent->ts2 - ent->ts1;
+       op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+       if (op < ARRAY_SIZE(cmd->stats)) {
+               stats = &cmd->stats[op];
+               spin_lock_irq(&stats->lock);
+               stats->sum += ds;
+               ++stats->n;
+               spin_unlock_irq(&stats->lock);
+       }
+       mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+                          "fw exec time for %s is %lld nsec\n",
+                          mlx5_command_str(op), ds);
+       *status = ent->status;
 
 out_free:
        free_cmd(ent);
@@ -1180,41 +1205,30 @@ err_dbg:
        return err;
 }
 
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        int i;
 
        for (i = 0; i < cmd->max_reg_cmds; i++)
                down(&cmd->sem);
-
        down(&cmd->pages_sem);
 
-       flush_workqueue(cmd->wq);
-
-       cmd->mode = CMD_MODE_EVENTS;
+       cmd->mode = mode;
 
        up(&cmd->pages_sem);
        for (i = 0; i < cmd->max_reg_cmds; i++)
                up(&cmd->sem);
 }
 
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
 {
-       struct mlx5_cmd *cmd = &dev->cmd;
-       int i;
-
-       for (i = 0; i < cmd->max_reg_cmds; i++)
-               down(&cmd->sem);
-
-       down(&cmd->pages_sem);
-
-       flush_workqueue(cmd->wq);
-       cmd->mode = CMD_MODE_POLLING;
+       mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
 
-       up(&cmd->pages_sem);
-       for (i = 0; i < cmd->max_reg_cmds; i++)
-               up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+       mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
 }
 
 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1250,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                        struct semaphore *sem;
 
                        ent = cmd->ent_arr[i];
+                       if (ent->callback)
+                               cancel_delayed_work(&ent->cb_timeout_work);
                        if (ent->page_queue)
                                sem = &cmd->pages_sem;
                        else
index e8a6c3325b396c6295db60c833309c4e4102e780..943b1bd434bf50cf6ef26b408035f3af4ea6c38a 100644 (file)
@@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 #endif
 
 struct mlx5e_params {
@@ -191,6 +190,7 @@ struct mlx5e_tstamp {
 enum {
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
        MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+       MLX5E_RQ_STATE_FLUSH_TIMEOUT,
 };
 
 struct mlx5e_cq {
@@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
 typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
                                  u16 ix);
 
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
 struct mlx5e_dma_info {
        struct page     *page;
        dma_addr_t      addr;
@@ -241,6 +243,7 @@ struct mlx5e_rq {
        struct mlx5e_cq        cq;
        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
        mlx5e_fp_alloc_wqe     alloc_wqe;
+       mlx5e_fp_dealloc_wqe   dealloc_wqe;
 
        unsigned long          state;
        int                    ix;
@@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
 enum {
        MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
        MLX5E_SQ_STATE_BF_ENABLE,
+       MLX5E_SQ_STATE_TX_TIMEOUT,
 };
 
 struct mlx5e_ico_wqe_info {
@@ -401,7 +405,7 @@ enum mlx5e_traffic_types {
 };
 
 enum {
-       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+       MLX5E_STATE_ASYNC_EVENTS_ENABLED,
        MLX5E_STATE_OPENED,
        MLX5E_STATE_DESTROYING,
 };
@@ -538,6 +542,7 @@ struct mlx5e_priv {
        struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
+       struct work_struct         tx_timeout_work;
        struct delayed_work        update_stats_work;
 
        struct mlx5_core_dev      *mdev;
@@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
 void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
                                    struct mlx5_cqe64 *cqe,
index b2db180ae2a5bbdda29219d63c72feea0958c12c..c585349e05c38ed26b49898d23be1ef83fa721a6 100644 (file)
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
                        tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
-                       tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+                       tc_tx_bw[i] = ets->tc_tx_bw[i];
                        break;
                }
        }
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
 
        /* Validate Bandwidth Sum */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       if (!ets->tc_tx_bw[i])
+                               return -EINVAL;
+
                        bw_sum += ets->tc_tx_bw[i];
+               }
        }
 
        if (bw_sum != 0 && bw_sum != 100)
index fc7dcc03b1debd36c6362d887e4f90b3cefab50f..e667a870e0c273e72c49e3708ba0de6370dceaca 100644 (file)
@@ -184,7 +184,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
 #define MLX5E_NUM_SQ_STATS(priv) \
        (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
         test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+       (hweight8(mlx5e_query_pfc_combined(priv)) * \
+        NUM_PPORT_PER_PRIO_PFC_COUNTERS)
 
 static int mlx5e_get_sset_count(struct net_device *dev, int sset)
 {
@@ -211,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
 
        /* SW counters */
        for (i = 0; i < NUM_SW_COUNTERS; i++)
-               strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+               strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
 
        /* Q counters */
        for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
-               strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+               strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
 
        /* VPORT counters */
        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      vport_stats_desc[i].name);
+                      vport_stats_desc[i].format);
 
        /* PPORT counters */
        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pport_802_3_stats_desc[i].name);
+                      pport_802_3_stats_desc[i].format);
 
        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pport_2863_stats_desc[i].name);
+                      pport_2863_stats_desc[i].format);
 
        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pport_2819_stats_desc[i].name);
+                      pport_2819_stats_desc[i].format);
 
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
-                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
-                               prio,
-                               pport_per_prio_traffic_stats_desc[i].name);
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                               pport_per_prio_traffic_stats_desc[i].format, prio);
        }
 
        pfc_combined = mlx5e_query_pfc_combined(priv);
        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
-                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
-                               prio, pport_per_prio_pfc_stats_desc[i].name);
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                               pport_per_prio_pfc_stats_desc[i].format, prio);
                }
        }
 
@@ -256,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
        /* per channel counters */
        for (i = 0; i < priv->params.num_channels; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
-                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
-                               rq_stats_desc[j].name);
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                               rq_stats_desc[j].format, i);
 
        for (tc = 0; tc < priv->params.num_tc; tc++)
                for (i = 0; i < priv->params.num_channels; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
-                                       "tx%d_%s",
-                                       priv->channeltc_to_txq_map[i][tc],
-                                       sq_stats_desc[j].name);
+                                       sq_stats_desc[j].format,
+                                       priv->channeltc_to_txq_map[i][tc]);
 }
 
 static void mlx5e_get_strings(struct net_device *dev,
index f5c8d5db25a8cebc367c752c4333bc839b72505a..5a4d88c2cdb292880496831603fb741f5700a880 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
+enum {
+       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
+       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
+       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
 struct mlx5e_rq_param {
        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param       wq;
@@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
        port_state = mlx5_query_vport_state(mdev,
                MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
 
-       if (port_state == VPORT_STATE_UP)
+       if (port_state == VPORT_STATE_UP) {
+               netdev_info(priv->netdev, "Link up\n");
                netif_carrier_on(priv->netdev);
-       else
+       } else {
+               netdev_info(priv->netdev, "Link down\n");
                netif_carrier_off(priv->netdev);
+       }
 }
 
 static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
        mutex_unlock(&priv->state_lock);
 }
 
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              tx_timeout_work);
+       int err;
+
+       rtnl_lock();
+       mutex_lock(&priv->state_lock);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto unlock;
+       mlx5e_close_locked(priv->netdev);
+       err = mlx5e_open_locked(priv->netdev);
+       if (err)
+               netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          err);
+unlock:
+       mutex_unlock(&priv->state_lock);
+       rtnl_unlock();
+}
+
 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
        struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -105,11 +135,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
-               s->lro_packets  += rq_stats->lro_packets;
-               s->lro_bytes    += rq_stats->lro_bytes;
+               s->rx_lro_packets += rq_stats->lro_packets;
+               s->rx_lro_bytes += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
-               s->rx_csum_sw   += rq_stats->csum_sw;
-               s->rx_csum_inner += rq_stats->csum_inner;
+               s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
                s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
@@ -122,24 +152,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
-                       s->tso_packets          += sq_stats->tso_packets;
-                       s->tso_bytes            += sq_stats->tso_bytes;
-                       s->tso_inner_packets    += sq_stats->tso_inner_packets;
-                       s->tso_inner_bytes      += sq_stats->tso_inner_bytes;
+                       s->tx_tso_packets       += sq_stats->tso_packets;
+                       s->tx_tso_bytes         += sq_stats->tso_bytes;
+                       s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+                       s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
-                       s->tx_csum_inner        += sq_stats->csum_offload_inner;
-                       tx_offload_none         += sq_stats->csum_offload_none;
+                       s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+                       tx_offload_none         += sq_stats->csum_none;
                }
        }
 
        /* Update calculated offload counters */
-       s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
-       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
-                            s->rx_csum_sw;
+       s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+       s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
 
-       s->link_down_events = MLX5_GET(ppcnt_reg,
+       s->link_down_events_phy = MLX5_GET(ppcnt_reg,
                                priv->stats.pport.phy_counters,
                                counter_set.phys_layer_cntrs.link_down_events);
 }
@@ -244,7 +273,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 {
        struct mlx5e_priv *priv = vpriv;
 
-       if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+       if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
 
        switch (event) {
@@ -260,12 +289,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 
 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
 {
-       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
 }
 
 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 {
-       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
@@ -306,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+               rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -321,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
                rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+               rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
 
                rq->wqe_sz = (priv->params.lro_en) ?
                                priv->params.lro_wqe_sz :
@@ -526,17 +557,25 @@ err_destroy_rq:
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
+       int tout = 0;
+       int err;
+
        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 
-       mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
-       while (!mlx5_wq_ll_is_empty(&rq->wq))
-               msleep(20);
+       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+              tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+       if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+               set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
 
        /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
        napi_synchronize(&rq->channel->napi);
 
        mlx5e_disable_rq(rq);
+       mlx5e_free_rx_descs(rq);
        mlx5e_destroy_rq(rq);
 }
 
@@ -580,7 +619,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
        int err;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+       err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
        if (err)
                return err;
 
@@ -783,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
+       int tout = 0;
+       int err;
+
        if (sq->txq) {
                clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                /* prevent netif_tx_wake_queue */
@@ -793,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
 
-               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+                                     MLX5_SQC_STATE_ERR);
+               if (err)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       while (sq->cc != sq->pc) /* wait till sq is empty */
-               msleep(20);
+       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+       while (sq->cc != sq->pc &&
+              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+       }
 
        /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
        napi_synchronize(&sq->channel->napi);
 
+       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
        mlx5e_destroy_sq(sq);
 }
@@ -1297,6 +1348,11 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
                        goto err_close_channels;
        }
 
+       /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_start_all_queues(priv->netdev);
+
        kfree(cparam);
        return 0;
 
@@ -1316,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
 {
        int i;
 
+       /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_stop_all_queues(priv->netdev);
+       netif_tx_disable(priv->netdev);
+
        for (i = 0; i < priv->params.num_channels; i++)
                mlx5e_close_channel(priv->channel[i]);
 
@@ -1659,8 +1721,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
 
        netdev_set_num_tc(netdev, ntc);
 
+       /* Map netdev TCs to offset 0
+        * We have our own UP to TXQ mapping for QoS
+        */
        for (tc = 0; tc < ntc; tc++)
-               netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+               netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
 int mlx5e_open_locked(struct net_device *netdev)
@@ -2591,6 +2656,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
        return features;
 }
 
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       bool sched_work = false;
+       int i;
+
+       netdev_err(dev, "TX timeout detected\n");
+
+       for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+               struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+               if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+                       continue;
+               sched_work = true;
+               set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+               netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+                          i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+       }
+
+       if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+               schedule_work(&priv->tx_timeout_work);
+}
+
 static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
@@ -2608,6 +2696,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
 #endif
+       .ndo_tx_timeout          = mlx5e_tx_timeout,
 };
 
 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2637,6 +2726,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
+       .ndo_tx_timeout          = mlx5e_tx_timeout,
 };
 
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2839,6 +2929,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+       INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
 }
 
index bd947704b59c41747a71d75725a5d230bde5dfc0..9f2a16a507e04f8cd9861251ab3d0d5973b23e90 100644 (file)
@@ -212,6 +212,20 @@ err_free_skb:
        return -ENOMEM;
 }
 
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct sk_buff *skb = rq->skb[ix];
+
+       if (skb) {
+               rq->skb[ix] = NULL;
+               dma_unmap_single(rq->pdev,
+                                *((dma_addr_t *)skb->cb),
+                                rq->wqe_sz,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb(skb);
+       }
+}
+
 static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
 {
        return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
        return 0;
 }
 
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+       wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe;
+       __be16 wqe_ix_be;
+       u16 wqe_ix;
+
+       while (!mlx5_wq_ll_is_empty(wq)) {
+               wqe_ix_be = *wq->tail_next;
+               wqe_ix    = be16_to_cpu(wqe_ix_be);
+               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+               rq->dealloc_wqe(rq, wqe_ix);
+               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+                              &wqe->next.next_wqe_index);
+       }
+}
+
 #define RQ_CANNOT_POST(rq) \
                (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
                 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -689,7 +727,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
        if (is_first_ethertype_ip(skb)) {
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
-               rq->stats.csum_sw++;
+               rq->stats.csum_complete++;
                return;
        }
 
@@ -699,7 +737,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
                        skb->encapsulation = 1;
-                       rq->stats.csum_inner++;
+                       rq->stats.csum_unnecessary_inner++;
                }
                return;
        }
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done = 0;
 
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+               return 0;
+
        if (cq->decmprs_left)
                work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
index 83bc32b25849caae0b9422688e09fba1a3928fe2..fcd490cc56109d2279444db47131bb31432f923b 100644 (file)
        be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
 
 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
 
 struct counter_desc {
-       char            name[ETH_GSTRING_LEN];
+       char            format[ETH_GSTRING_LEN];
        int             offset; /* Byte offset */
 };
 
@@ -53,18 +55,18 @@ struct mlx5e_sw_stats {
        u64 rx_bytes;
        u64 tx_packets;
        u64 tx_bytes;
-       u64 tso_packets;
-       u64 tso_bytes;
-       u64 tso_inner_packets;
-       u64 tso_inner_bytes;
-       u64 lro_packets;
-       u64 lro_bytes;
-       u64 rx_csum_good;
+       u64 tx_tso_packets;
+       u64 tx_tso_bytes;
+       u64 tx_tso_inner_packets;
+       u64 tx_tso_inner_bytes;
+       u64 rx_lro_packets;
+       u64 rx_lro_bytes;
+       u64 rx_csum_unnecessary;
        u64 rx_csum_none;
-       u64 rx_csum_sw;
-       u64 rx_csum_inner;
-       u64 tx_csum_offload;
-       u64 tx_csum_inner;
+       u64 rx_csum_complete;
+       u64 rx_csum_unnecessary_inner;
+       u64 tx_csum_partial;
+       u64 tx_csum_partial_inner;
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
@@ -76,7 +78,7 @@ struct mlx5e_sw_stats {
        u64 rx_cqe_compress_pkts;
 
        /* Special handling counters */
-       u64 link_down_events;
+       u64 link_down_events_phy;
 };
 
 static const struct counter_desc sw_stats_desc[] = {
@@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
@@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
 };
 
 struct mlx5e_qcounter_stats {
@@ -125,12 +127,6 @@ struct mlx5e_vport_stats {
 };
 
 static const struct counter_desc vport_stats_desc[] = {
-       { "rx_vport_error_packets",
-               VPORT_COUNTER_OFF(received_errors.packets) },
-       { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
-       { "tx_vport_error_packets",
-               VPORT_COUNTER_OFF(transmit_errors.packets) },
-       { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
        { "rx_vport_unicast_packets",
                VPORT_COUNTER_OFF(received_eth_unicast.packets) },
        { "rx_vport_unicast_bytes",
@@ -192,94 +188,68 @@ struct mlx5e_pport_stats {
 };
 
 static const struct counter_desc pport_802_3_stats_desc[] = {
-       { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
-       { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
-       { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
-       { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
-       { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
-       { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
-       { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
-       { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
-       { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
-       { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
-       { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
-       { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
-       { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
-       { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
-       { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
-       { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
-       { "unsupported_op_rx",
-               PPORT_802_3_OFF(a_unsupported_opcodes_received) },
-       { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
-       { "pause_ctrl_tx",
-               PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+       { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+       { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+       { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+       { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+       { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+       { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+       { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+       { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+       { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+       { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+       { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+       { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+       { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+       { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+       { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+       { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+       { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+       { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
 };
 
 static const struct counter_desc pport_2863_stats_desc[] = {
-       { "in_octets", PPORT_2863_OFF(if_in_octets) },
-       { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
-       { "in_discards", PPORT_2863_OFF(if_in_discards) },
-       { "in_errors", PPORT_2863_OFF(if_in_errors) },
-       { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
-       { "out_octets", PPORT_2863_OFF(if_out_octets) },
-       { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
-       { "out_discards", PPORT_2863_OFF(if_out_discards) },
-       { "out_errors", PPORT_2863_OFF(if_out_errors) },
-       { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
-       { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
-       { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
-       { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+       { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+       { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+       { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
 };
 
 static const struct counter_desc pport_2819_stats_desc[] = {
-       { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
-       { "octets", PPORT_2819_OFF(ether_stats_octets) },
-       { "pkts", PPORT_2819_OFF(ether_stats_pkts) },
-       { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
-       { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
-       { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
-       { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
-       { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
-       { "fragments", PPORT_2819_OFF(ether_stats_fragments) },
-       { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
-       { "collisions", PPORT_2819_OFF(ether_stats_collisions) },
-       { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
-       { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
-       { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
-       { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
-       { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
-       { "p1024to1518octets",
-               PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
-       { "p1519to2047octets",
-               PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
-       { "p2048to4095octets",
-               PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
-       { "p4096to8191octets",
-               PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
-       { "p8192to10239octets",
-               PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+       { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+       { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+       { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+       { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+       { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+       { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+       { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+       { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+       { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+       { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+       { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+       { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+       { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
 };
 
 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
-       { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
-       { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
-       { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
-       { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+       { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+       { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+       { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+       { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
 };
 
 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
-       { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
-       { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
-       { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
-       { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
-       { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+       { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+       { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+       { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+       { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+       { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
 };
 
 struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
-       u64 csum_sw;
-       u64 csum_inner;
+       u64 csum_complete;
+       u64 csum_unnecessary_inner;
        u64 csum_none;
        u64 lro_packets;
        u64 lro_bytes;
@@ -292,19 +262,19 @@ struct mlx5e_rq_stats {
 };
 
 static const struct counter_desc rq_stats_desc[] = {
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
 };
 
 struct mlx5e_sq_stats {
@@ -315,28 +285,28 @@ struct mlx5e_sq_stats {
        u64 tso_bytes;
        u64 tso_inner_packets;
        u64 tso_inner_bytes;
-       u64 csum_offload_inner;
+       u64 csum_partial_inner;
        u64 nop;
        /* less likely accessed in data path */
-       u64 csum_offload_none;
+       u64 csum_none;
        u64 stopped;
        u64 wake;
        u64 dropped;
 };
 
 static const struct counter_desc sq_stats_desc[] = {
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
 };
 
 #define NUM_SW_COUNTERS                        ARRAY_SIZE(sw_stats_desc)
index b000ddc29553bb40e297f075c1a3f7606487a87e..5740b465ef8430f359ed218adf9f93bce9c46e8a 100644 (file)
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int channel_ix = fallback(dev, skb);
-       int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
-                skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+       int up = 0;
+
+       if (!netdev_get_num_tc(dev))
+               return channel_ix;
+
+       if (skb_vlan_tag_present(skb))
+               up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+       /* channel_ix can be larger than num_channels since
+        * dev->num_real_tx_queues = num_channels * num_tc
+        */
+       if (channel_ix >= priv->params.num_channels)
+               channel_ix = reciprocal_scale(channel_ix,
+                                             priv->params.num_channels);
 
        return priv->channeltc_to_txq_map[channel_ix][up];
 }
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
         * headers and occur before the data gather.
         * Therefore these headers must be copied into the WQE
         */
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 
        if (bf) {
                u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
                        return skb_headlen(skb);
        }
 
-       return MLX5E_MIN_INLINE;
+       return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
 }
 
 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -192,12 +204,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                if (skb->encapsulation) {
                        eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
                                          MLX5_ETH_WQE_L4_INNER_CSUM;
-                       sq->stats.csum_offload_inner++;
+                       sq->stats.csum_partial_inner++;
                } else {
                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
                }
        } else
-               sq->stats.csum_offload_none++;
+               sq->stats.csum_none++;
 
        if (sq->cc != sq->prev_cc) {
                sq->prev_cc = sq->cc;
@@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        return mlx5e_sq_xmit(sq, skb);
 }
 
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+       struct mlx5e_tx_wqe_info *wi;
+       struct sk_buff *skb;
+       u16 ci;
+       int i;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+               wi = &sq->wqe_info[ci];
+
+               if (!skb) { /* nop */
+                       sq->cc++;
+                       continue;
+               }
+
+               for (i = 0; i < wi->num_dma; i++) {
+                       struct mlx5e_sq_dma *dma =
+                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+                       mlx5e_tx_dma_unmap(sq->pdev, dma);
+               }
+
+               dev_kfree_skb_any(skb);
+               sq->cc += wi->num_wqebbs;
+       }
+}
+
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq *sq;
@@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        sq = container_of(cq, struct mlx5e_sq, cq);
 
+       if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+               return false;
+
        npkts = 0;
        nbytes = 0;
 
index 42d16b9458e485205a344ab9a988caf7ae185632..96a59463ae65f1773856949b17a9224531cfe930 100644 (file)
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
 
 void mlx5_enter_error_state(struct mlx5_core_dev *dev)
 {
+       mutex_lock(&dev->intf_state_mutex);
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
-               return;
+               goto unlock;
 
        mlx5_core_err(dev, "start\n");
-       if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+       if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
                dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+               trigger_cmd_completions(dev);
+       }
 
        mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
        mlx5_core_err(dev, "end\n");
+
+unlock:
+       mutex_unlock(&dev->intf_state_mutex);
 }
 
 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
        u32 count;
 
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-               trigger_cmd_completions(dev);
                mod_timer(&health->timer, get_next_poll_jiffies());
                return;
        }
index a19b59348dd685816c88736a4d9e47f78b3f847c..6695893ddd2d407743e8329959a05840c28ad3f1 100644 (file)
@@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
        mlx5_pci_err_detected(dev->pdev, 0);
 }
 
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
  */
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
        struct mlx5_core_health *health = &dev->priv.health;
        const int niter = 100;
+       u32 last_count = 0;
        u32 count;
-       u16 did;
        int i;
 
-       /* Wait for firmware to be ready after reset */
-       msleep(1000);
-       for (i = 0; i < niter; i++) {
-               if (pci_read_config_word(pdev, 2, &did)) {
-                       dev_warn(&pdev->dev, "failed reading config word\n");
-                       break;
-               }
-               if (did == pdev->device) {
-                       dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
-                       break;
-               }
-               msleep(50);
-       }
-       if (i == niter)
-               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
        for (i = 0; i < niter; i++) {
                count = ioread32be(health->health_counter);
                if (count && count != 0xffffffff) {
-                       dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
-                       break;
+                       if (last_count && last_count != count) {
+                               dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+                               return 0;
+                       }
+                       last_count = count;
                }
                msleep(50);
        }
 
-       if (i == niter)
-               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+       return -ETIMEDOUT;
 }
 
 static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
        dev_info(&pdev->dev, "%s was called\n", __func__);
 
        pci_save_state(pdev);
-       wait_vital(pdev);
+       err = wait_vital(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+               return;
+       }
 
        err = mlx5_load_one(dev, priv);
        if (err)
@@ -1508,8 +1497,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4 VF */
        { PCI_VDEVICE(MELLANOX, 0x1015) },                      /* ConnectX-4LX */
        { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4LX VF */
-       { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5 */
+       { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5, PCIe 3.0 */
        { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 VF */
+       { PCI_VDEVICE(MELLANOX, 0x1019) },                      /* ConnectX-5, PCIe 4.0 */
        { 0, }
 };
 
index 9eeee0545f1cf294ecddb7bb0becc873df8906da..32dea3524cee3ed4d984eb5aafc7a06abdbd713b 100644 (file)
@@ -345,7 +345,6 @@ retry:
                               func_id, npages, err);
                goto out_4k;
        }
-       dev->priv.fw_pages += npages;
 
        err = mlx5_cmd_status_to_err(&out.hdr);
        if (err) {
@@ -373,6 +372,33 @@ out_free:
        return err;
 }
 
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+                            struct mlx5_manage_pages_inbox *in, int in_size,
+                            struct mlx5_manage_pages_outbox *out, int out_size)
+{
+       struct fw_page *fwp;
+       struct rb_node *p;
+       u32 npages;
+       u32 i = 0;
+
+       if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+                                                 (u32 *)out, out_size);
+
+       npages = be32_to_cpu(in->num_entries);
+
+       p = rb_first(&dev->priv.page_root);
+       while (p && i < npages) {
+               fwp = rb_entry(p, struct fw_page, rb_node);
+               out->pas[i] = cpu_to_be64(fwp->addr);
+               p = rb_next(p);
+               i++;
+       }
+
+       out->num_entries = cpu_to_be32(i);
+       return 0;
+}
+
 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                         int *nclaimed)
 {
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        in.func_id = cpu_to_be16(func_id);
        in.num_entries = cpu_to_be32(npages);
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+       err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
        if (err) {
-               mlx5_core_err(dev, "failed reclaiming pages\n");
-               goto out_free;
-       }
-       dev->priv.fw_pages -= npages;
-
-       if (out->hdr.status) {
-               err = mlx5_cmd_status_to_err(&out->hdr);
+               mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
                goto out_free;
        }
 
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                err = -EINVAL;
                goto out_free;
        }
-       if (nclaimed)
-               *nclaimed = num_claimed;
 
        for (i = 0; i < num_claimed; i++) {
                addr = be64_to_cpu(out->pas[i]);
                free_4k(dev, addr);
        }
+
+       if (nclaimed)
+               *nclaimed = num_claimed;
+
        dev->priv.fw_pages -= num_claimed;
        if (func_id)
                dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-                               free_4k(dev, fwp->addr);
-                               nclaimed = 1;
-                       } else {
-                               err = reclaim_pages(dev, fwp->func_id,
-                                                   optimal_reclaimed_pages(),
-                                                   &nclaimed);
-                       }
+                       err = reclaim_pages(dev, fwp->func_id,
+                                           optimal_reclaimed_pages(),
+                                           &nclaimed);
+
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
                                               err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                }
        } while (p);
 
+       WARN(dev->priv.fw_pages,
+            "FW pages counter is %d after reclaiming all pages\n",
+            dev->priv.fw_pages);
+       WARN(dev->priv.vfs_pages,
+            "VFs FW pages counter is %d after reclaiming all pages\n",
+            dev->priv.vfs_pages);
+
        return 0;
 }
 
index daf44cd4c566f45649b9efe1e4417b4e868c9048..91846dfcbe9cf1b39f0fbf6af8856ad4771dd1ff 100644 (file)
@@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
 {
        int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
        void *nic_vport_context;
-       u8 *guid;
        void *in;
        int err;
 
@@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
 
        nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
                                         in, nic_vport_context);
-       guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
-                           node_guid);
        MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
 
        err = mlx5_modify_nic_vport_context(mdev, in, inlen);
index f2fd1ef16da7eba68deb5af1648b7ff28cf7ff44..e25a73ed2981848efa6175a6eeb43ad6891df1ec 100644 (file)
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
        u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
        u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
 
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
 
        MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
                 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
        struct mlx5e_vxlan *vxlan;
        int err;
 
+       if (mlx5e_vxlan_lookup_port(priv, port))
+               goto free_work;
+
        if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
                goto free_work;
 
index ce21ee5b23577ee63e8b5679a1fe2204c3ccd895..821a087c7ae221200f1a79c54ef399910325f644 100644 (file)
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
        err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
                                  &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
        err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
                                  &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+       err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+                                 &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
index 1977e7a5c5301cc591fade66cb290eb7dbd163e1..57d48da709fb77076c2cd737ad87c02ef0ff3f90 100644 (file)
@@ -2718,7 +2718,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
  * Configures the switch priority to buffer table.
  */
 #define MLXSW_REG_PPTB_ID 0x500B
-#define MLXSW_REG_PPTB_LEN 0x0C
+#define MLXSW_REG_PPTB_LEN 0x10
 
 static const struct mlxsw_reg_info mlxsw_reg_pptb = {
        .id = MLXSW_REG_PPTB_ID,
@@ -2784,6 +2784,13 @@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
  */
 MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
 
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
 #define MLXSW_REG_PPTB_ALL_PRIO 0xFF
 
 static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
@@ -2792,6 +2799,14 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
        mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
        mlxsw_reg_pptb_local_port_set(payload, local_port);
        mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+       mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+                                                   u8 buff)
+{
+       mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+       mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
 }
 
 /* PBMC - Port Buffer Management Control Register
index 6f9e3ddff4a8df98dd95818db7e722aaf8cb76a9..374080027b2f2da103a5a2e89b16f16e689239ec 100644 (file)
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 }
 
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
-                                        bool *p_is_up)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       char paos_pl[MLXSW_REG_PAOS_LEN];
-       u8 oper_status;
-       int err;
-
-       mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
-       if (err)
-               return err;
-       oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
-       *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
-       return 0;
-}
-
 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                      unsigned char *addr)
 {
@@ -408,7 +391,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
        }
 
        mlxsw_sp_txhdr_construct(skb, &tx_info);
-       len = skb->len;
+       /* TX header is consumed by HW on the way so we shouldn't count its
+        * bytes as being sent.
+        */
+       len = skb->len - MLXSW_TXHDR_LEN;
+
        /* Due to a race we might fail here because of a full queue. In that
         * unlikely case we simply drop the packet.
         */
@@ -1430,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
 
        cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
                         mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
-                        SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+                        SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+                        SUPPORTED_Autoneg;
        cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
        mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
                                        eth_proto_oper, cmd);
@@ -1489,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
        u32 eth_proto_new;
        u32 eth_proto_cap;
        u32 eth_proto_admin;
-       bool is_up;
        int err;
 
        speed = ethtool_cmd_speed(cmd);
@@ -1521,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
                return err;
        }
 
-       err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
-       if (err) {
-               netdev_err(dev, "Failed to get oper status");
-               return err;
-       }
-       if (!is_up)
+       if (!netif_running(dev))
                return 0;
 
        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
index a3720a0fad7d6ff0573256bc95d081e8d25ff2e1..074cdda7b6f337a6985e10a8d3620dd2825d2f3e 100644 (file)
@@ -194,7 +194,7 @@ static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
        mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+               mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
                               pptb_pl);
 }
index 0b323661c0b6b3122c9aba83fc8d0e3d0f2e68ad..01cfb75128278ca2a1b263636c62314d5bc3d1c2 100644 (file)
@@ -103,7 +103,8 @@ static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
 
        mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+               mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
                               pptb_pl);
 }
@@ -249,6 +250,7 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
                return err;
 
        memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+       mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
 
        return 0;
 }
@@ -351,7 +353,8 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        int err;
 
-       if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+       if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+           pfc->pfc_en) {
                netdev_err(dev, "PAUSE frames already enabled on port\n");
                return -EINVAL;
        }
@@ -371,6 +374,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
        }
 
        memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+       mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
 
        return 0;
 
index 3842eab9449a4fd294ac7a24d02c6b48de4a4802..25f658b3849a6beb6c1febc9ba5beab66bec5472 100644 (file)
@@ -316,7 +316,10 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
                }
        }
        mlxsw_sx_txhdr_construct(skb, &tx_info);
-       len = skb->len;
+       /* TX header is consumed by HW on the way so we shouldn't count its
+        * bytes as being sent.
+        */
+       len = skb->len - MLXSW_TXHDR_LEN;
        /* Due to a race we might fail here because of a full queue. In that
         * unlikely case we simply drop the packet.
         */
index 7066954c39d682fe229bf8c151ac46047c4eae77..0a26b11ca8f61eff9f7c5b880b770a99948f07bf 100644 (file)
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
                        enc28j60_phy_read(priv, PHIR);
                }
                /* TX complete handler */
-               if ((intflags & EIR_TXIF) != 0) {
+               if (((intflags & EIR_TXIF) != 0) &&
+                   ((intflags & EIR_TXERIF) == 0)) {
                        bool err = false;
                        loop++;
                        if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
                                        enc28j60_tx_clear(ndev, true);
                        } else
                                enc28j60_tx_clear(ndev, true);
-                       locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+                       locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
                }
                /* RX Error handler */
                if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
  */
 static void enc28j60_hw_tx(struct enc28j60_net *priv)
 {
+       BUG_ON(!priv->tx_skb);
+
        if (netif_msg_tx_queued(priv))
                printk(KERN_DEBUG DRV_NAME
                        ": Tx Packet Len:%d\n", priv->tx_skb->len);
index fa47c14c743ad811252c753302d4b52e944123f8..ba26bb356b8dff20f3ef6e9efffc089d96c30281 100644 (file)
@@ -2015,7 +2015,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
 
        netif_tx_wake_all_queues(nn->netdev);
 
-       enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+       enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
        nfp_net_read_link_status(nn);
 }
 
@@ -2044,7 +2044,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
                                      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
        if (err)
                goto err_free_exn;
-       disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+       disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
 
        nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
                               GFP_KERNEL);
@@ -2133,7 +2133,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
 {
        unsigned int r;
 
-       disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+       disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
        netif_carrier_off(nn->netdev);
        nn->link_up = false;
 
index 9afc15fdbb02af0762a2b46f1ed5fde59322e846..e29ed5a69566357d144b1997151559792f66a656 100644 (file)
@@ -3700,6 +3700,7 @@ struct public_port {
 #define MEDIA_DA_TWINAX         0x3
 #define MEDIA_BASE_T            0x4
 #define MEDIA_SFP_1G_FIBER      0x5
+#define MEDIA_MODULE_FIBER      0x6
 #define MEDIA_KR                0xf0
 #define MEDIA_NOT_PRESENT       0xff
 
index 8fba87dd48afaf6ce9ce756bdb24112e7d921176..aada4c7e095f2130c688e30384cdb29b944cc9d5 100644 (file)
@@ -72,6 +72,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
        p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
        p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
+       p_ramrod->untagged              = p_params->only_untagged;
 
        SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
        SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -247,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
                SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
                          !!(accept_filter & QED_ACCEPT_NONE));
 
-               SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
-                         (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
-                          !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
                SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
                          !!(accept_filter & QED_ACCEPT_NONE));
 
@@ -1748,7 +1745,8 @@ static int qed_start_vport(struct qed_dev *cdev,
                           start.vport_id, start.mtu);
        }
 
-       qed_reset_vport_stats(cdev);
+       if (params->clear_stats)
+               qed_reset_vport_stats(cdev);
 
        return 0;
 }
index 61cc6869fa6508f549d59bc9a525e5e3901d73a7..c7e01b3035407e83c8c0c2e8216275ae9c8811aa 100644 (file)
@@ -1085,6 +1085,7 @@ static int qed_get_port_type(u32 media_type)
        case MEDIA_SFPP_10G_FIBER:
        case MEDIA_SFP_1G_FIBER:
        case MEDIA_XFP_FIBER:
+       case MEDIA_MODULE_FIBER:
        case MEDIA_KR:
                port_type = PORT_FIBRE;
                break;
index acac6626a1b29417eacdbda3751cee4cb241ce9a..b122f6013b6c1d786dcf8bb6b4b1bcea914c31e3 100644 (file)
@@ -213,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
        SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
                  DQ_XCM_CORE_SPQ_PROD_CMD);
        db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
-       /* validate producer is up to-date */
-       rmb();
-
        db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 
-       /* do not reorder */
-       barrier();
+       /* make sure the SPQE is updated before the doorbell */
+       wmb();
 
        DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
 
        /* make sure doorbell is rang */
-       mmiowb();
+       wmb();
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
                   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -614,7 +610,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 
                        *p_en2 = *p_ent;
 
-                       kfree(p_ent);
+                       /* EBLOCK responsible to free the allocated p_ent */
+                       if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+                               kfree(p_ent);
 
                        p_ent = p_en2;
                }
@@ -749,6 +747,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
                 * Thus, after gaining the answer perform the cleanup here.
                 */
                rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+               if (p_ent->queue == &p_spq->unlimited_pending) {
+                       /* This is an allocated p_ent which does not need to
+                        * return to pool.
+                        */
+                       kfree(p_ent);
+                       return rc;
+               }
+
                if (rc)
                        goto spq_post_fail2;
 
@@ -844,8 +851,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
                found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
                                        fw_return_code);
 
-       if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
-               /* EBLOCK is responsible for freeing its own entry */
+       if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+           (found->queue == &p_spq->unlimited_pending))
+               /* EBLOCK  is responsible for returning its own entry into the
+                * free list, unless it originally added the entry into the
+                * unlimited pending list.
+                */
                qed_spq_return_entry(p_hwfn, found);
 
        /* Attempt to post pending requests */
index 5733d188822348c45c5a4f23cd245a0e3cfc8188..f8e11f953acb060b4501ab03da0743a7dc548dab 100644 (file)
@@ -3231,7 +3231,7 @@ static int qede_stop_queues(struct qede_dev *edev)
        return rc;
 }
 
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
        int rc, tc, i;
        int vlan_removal_en = 1;
@@ -3462,6 +3462,7 @@ out:
 
 enum qede_load_mode {
        QEDE_LOAD_NORMAL,
+       QEDE_LOAD_RELOAD,
 };
 
 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3500,7 +3501,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
                goto err3;
        DP_INFO(edev, "Setup IRQs succeeded\n");
 
-       rc = qede_start_queues(edev);
+       rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
        if (rc)
                goto err4;
        DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3555,7 +3556,7 @@ void qede_reload(struct qede_dev *edev,
        if (func)
                func(edev, args);
 
-       qede_load(edev, QEDE_LOAD_NORMAL);
+       qede_load(edev, QEDE_LOAD_RELOAD);
 
        mutex_lock(&edev->qede_lock);
        qede_config_rx_mode(edev->ndev);
index 7bd6f25b4625f3bd634f9ac519127edca0a4fbce..87c642d3b075b2bc9845ba2cdbb4204e788028c3 100644 (file)
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
 
+       /* Ensure writes are complete before HW fetches Tx descriptors */
+       wmb();
        qlcnic_update_cmd_producer(tx_ring);
 
        return NETDEV_TX_OK;
@@ -2220,7 +2222,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
        if (!opcode)
                return;
 
-       ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+       ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
        qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
        desc = &sds_ring->desc_head[consumer];
        desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
index 133e9e35be9e9212bf14531bffd1c0cf78fde742..4c83739d158f1d36714b40d4eedfbec34c942882 100644 (file)
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
                             const struct efx_farch_register_test *regs,
                             size_t n_regs)
 {
-       unsigned address = 0, i, j;
+       unsigned address = 0;
+       int i, j;
        efx_oword_t mask, imask, original, reg, buf;
 
        for (i = 0; i < n_regs; ++i) {
index 8af25563f627034e49be2304ad8531ed6c19bd99..b5ab5e120bca3a8631e3634134f740449773e4b2 100644 (file)
@@ -116,7 +116,6 @@ struct smsc911x_data {
 
        struct phy_device *phy_dev;
        struct mii_bus *mii_bus;
-       int phy_irq[PHY_MAX_ADDR];
        unsigned int using_extphy;
        int last_duplex;
        int last_carrier;
@@ -1073,7 +1072,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
        pdata->mii_bus->priv = pdata;
        pdata->mii_bus->read = smsc911x_mii_read;
        pdata->mii_bus->write = smsc911x_mii_write;
-       memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
 
        pdata->mii_bus->parent = &pdev->dev;
 
index a473c182c91d0e4f981640ee285486e4a48e95d8..e4071265be76f2240915879f13130fa30e592a39 100644 (file)
@@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
                                priv->tx_path_in_lpi_mode = true;
                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
                                priv->tx_path_in_lpi_mode = false;
-                       if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+                       if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
                                priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
                                                        priv->rx_tail_addr,
                                                        STMMAC_CHAN0);
index e6bb0ecb12c74cb88204635d3ec2712816597a44..53190894f17a04d00cd8061ad95dcd2776ffcda6 100644 (file)
@@ -2505,8 +2505,6 @@ static int cpsw_probe(struct platform_device *pdev)
 clean_ale_ret:
        cpsw_ale_destroy(priv->ale);
 clean_dma_ret:
-       cpdma_chan_destroy(priv->txch);
-       cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
 clean_runtime_disable_ret:
        pm_runtime_disable(&pdev->dev);
@@ -2534,8 +2532,6 @@ static int cpsw_remove(struct platform_device *pdev)
        unregister_netdev(ndev);
 
        cpsw_ale_destroy(priv->ale);
-       cpdma_chan_destroy(priv->txch);
-       cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
        pm_runtime_disable(&pdev->dev);
        device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
index 0a15acc075b3abfac4ce3ac5eed3f1bbf1f0f76e..11213a38c7952be27cd032933881df9124ed5ac8 100644 (file)
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
        if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
                struct mpipe_data *md = &mpipe_data[instance];
                struct skb_shared_hwtstamps shhwtstamps;
-               struct timespec ts;
+               struct timespec64 ts;
 
                shtx->tx_flags |= SKBTX_IN_PROGRESS;
                gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
 /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
 static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
 {
-       struct timespec ts;
+       struct timespec64 ts;
 
-       getnstimeofday(&ts);
+       ktime_get_ts64(&ts);
        gxio_mpipe_set_timestamp(&md->context, &ts);
 
        mutex_init(&md->ptp_lock);
index b0be0234abf6a3bc8a1060434a2b5c6af88112f5..a957a1c7e5babd91b5fa824afa266956a7e8d4fc 100644 (file)
@@ -17,4 +17,4 @@ skfp-objs :=  skfddi.o    hwmtm.o    fplustm.o  smt.o      cfm.o     \
 #   projects. To keep the source common for all those drivers (and
 #   thus simplify fixes to it), please do not clean it up!
 
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
index cadefe4fdaa2aa71cfab4dc8dc0dd4b326f5745b..9b3dc3c61e00b5839fbd6c98ea6b3383b753df84 100644 (file)
@@ -958,8 +958,8 @@ tx_error:
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
                dev->stats.tx_carrier_errors++;
-       else
-               dev->stats.tx_errors++;
+
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
@@ -1048,8 +1048,8 @@ tx_error:
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
                dev->stats.tx_carrier_errors++;
-       else
-               dev->stats.tx_errors++;
+
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 #endif
@@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
+       struct geneve_dev *geneve = netdev_priv(dev);
        /* The max_mtu calculation does not take account of GENEVE
         * options, to avoid excluding potentially valid
         * configurations.
         */
-       int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
-               - dev->hard_header_len;
+       int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
+       if (geneve->remote.sa.sa_family == AF_INET6)
+               max_mtu -= sizeof(struct ipv6hdr);
+       else
+               max_mtu -= sizeof(struct iphdr);
 
        if (new_mtu < 68)
                return -EINVAL;
@@ -1508,6 +1513,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 {
        struct nlattr *tb[IFLA_MAX + 1];
        struct net_device *dev;
+       LIST_HEAD(list_kill);
        int err;
 
        memset(tb, 0, sizeof(tb));
@@ -1519,8 +1525,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
        err = geneve_configure(net, dev, &geneve_remote_unspec,
                               0, 0, 0, 0, htons(dst_port), true,
                               GENEVE_F_UDP_ZERO_CSUM6_RX);
-       if (err)
-               goto err;
+       if (err) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
 
        /* openvswitch users expect packet sizes to be unrestricted,
         * so set the largest MTU we can.
@@ -1529,10 +1537,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
        if (err)
                goto err;
 
+       err = rtnl_configure_link(dev, NULL);
+       if (err < 0)
+               goto err;
+
        return dev;
 
  err:
-       free_netdev(dev);
+       geneve_dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
index 47ee2c840b55e730a8913b9736e8fdef073574dc..8bcd78f9496638e30c313abb74d004729d0dae16 100644 (file)
@@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
        dev_put(dev);
 }
 
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+                                            unsigned char **iv,
+                                            struct scatterlist **sg)
+{
+       size_t size, iv_offset, sg_offset;
+       struct aead_request *req;
+       void *tmp;
+
+       size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+       iv_offset = size;
+       size += GCM_AES_IV_LEN;
+
+       size = ALIGN(size, __alignof__(struct scatterlist));
+       sg_offset = size;
+       size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+       tmp = kmalloc(size, GFP_ATOMIC);
+       if (!tmp)
+               return NULL;
+
+       *iv = (unsigned char *)(tmp + iv_offset);
+       *sg = (struct scatterlist *)(tmp + sg_offset);
+       req = tmp;
+
+       aead_request_set_tfm(req, tfm);
+
+       return req;
+}
+
 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                                      struct net_device *dev)
 {
        int ret;
-       struct scatterlist sg[MAX_SKB_FRAGS + 1];
-       unsigned char iv[GCM_AES_IV_LEN];
+       struct scatterlist *sg;
+       unsigned char *iv;
        struct ethhdr *eth;
        struct macsec_eth_header *hh;
        size_t unprotected_len;
@@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
        macsec_fill_sectag(hh, secy, pn);
        macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 
-       macsec_fill_iv(iv, secy->sci, pn);
-
        skb_put(skb, secy->icv_len);
 
        if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+       req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
        if (!req) {
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
                return ERR_PTR(-ENOMEM);
        }
 
+       macsec_fill_iv(iv, secy->sci, pn);
+
        sg_init_table(sg, MAX_SKB_FRAGS + 1);
        skb_to_sgvec(skb, sg, 0, skb->len);
 
@@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
 out:
        macsec_rxsa_put(rx_sa);
        dev_put(dev);
-       return;
 }
 
 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
                                      struct macsec_secy *secy)
 {
        int ret;
-       struct scatterlist sg[MAX_SKB_FRAGS + 1];
-       unsigned char iv[GCM_AES_IV_LEN];
+       struct scatterlist *sg;
+       unsigned char *iv;
        struct aead_request *req;
        struct macsec_eth_header *hdr;
        u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+       req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
        if (!req) {
                kfree_skb(skb);
                return ERR_PTR(-ENOMEM);
@@ -1234,7 +1262,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
        struct crypto_aead *tfm;
        int ret;
 
-       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
        if (!tfm || IS_ERR(tfm))
                return NULL;
 
@@ -2612,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
                u64_stats_update_begin(&secy_stats->syncp);
                secy_stats->stats.OutPktsUntagged++;
                u64_stats_update_end(&secy_stats->syncp);
+               skb->dev = macsec->real_dev;
                len = skb->len;
                ret = dev_queue_xmit(skb);
                count_tx(dev, ret, len);
@@ -3361,6 +3390,7 @@ static void __exit macsec_exit(void)
        genl_unregister_family(&macsec_fam);
        rtnl_link_unregister(&macsec_link_ops);
        unregister_netdevice_notifier(&macsec_notifier);
+       rcu_barrier();
 }
 
 module_init(macsec_init);
index 2afa61b51d411d45bd0b673e6cc93125c2eb9927..91177a4a32ad21c1cbea044424659f5604cef7de 100644 (file)
@@ -57,6 +57,7 @@
 
 /* PHY CTRL bits */
 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT         14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK          (3 << 14)
 
 /* RGMIIDCTL bits */
 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT       4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
 static int dp83867_config_init(struct phy_device *phydev)
 {
        struct dp83867_private *dp83867;
-       int ret;
-       u16 val, delay;
+       int ret, val;
+       u16 delay;
 
        if (!phydev->priv) {
                dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
        }
 
        if (phy_interface_is_rgmii(phydev)) {
-               ret = phy_write(phydev, MII_DP83867_PHYCTRL,
-                       (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+               val = phy_read(phydev, MII_DP83867_PHYCTRL);
+               if (val < 0)
+                       return val;
+               val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+               val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+               ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
                if (ret)
                        return ret;
        }
index 2d2e4339f0df40b5379be29b12f87587db1944a3..9ec7f735343482eb4db054daa5fb5af4cec236ba 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/gpio.h>
+#include <linux/idr.h>
 
 #define MII_REGS_NUM 29
 
@@ -286,6 +287,8 @@ err_regs:
 }
 EXPORT_SYMBOL_GPL(fixed_phy_add);
 
+static DEFINE_IDA(phy_fixed_ida);
+
 static void fixed_phy_del(int phy_addr)
 {
        struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -297,14 +300,12 @@ static void fixed_phy_del(int phy_addr)
                        if (gpio_is_valid(fp->link_gpio))
                                gpio_free(fp->link_gpio);
                        kfree(fp);
+                       ida_simple_remove(&phy_fixed_ida, phy_addr);
                        return;
                }
        }
 }
 
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
 struct phy_device *fixed_phy_register(unsigned int irq,
                                      struct fixed_phy_status *status,
                                      int link_gpio,
@@ -319,17 +320,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
                return ERR_PTR(-EPROBE_DEFER);
 
        /* Get the next available PHY address, up to PHY_MAX_ADDR */
-       spin_lock(&phy_fixed_addr_lock);
-       if (phy_fixed_addr == PHY_MAX_ADDR) {
-               spin_unlock(&phy_fixed_addr_lock);
-               return ERR_PTR(-ENOSPC);
-       }
-       phy_addr = phy_fixed_addr++;
-       spin_unlock(&phy_fixed_addr_lock);
+       phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+       if (phy_addr < 0)
+               return ERR_PTR(phy_addr);
 
        ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
-       if (ret < 0)
+       if (ret < 0) {
+               ida_simple_remove(&phy_fixed_ida, phy_addr);
                return ERR_PTR(ret);
+       }
 
        phy = get_phy_device(fmb->mii_bus, phy_addr, false);
        if (IS_ERR(phy)) {
@@ -434,6 +433,7 @@ static void __exit fixed_mdio_bus_exit(void)
                list_del(&fp->node);
                kfree(fp);
        }
+       ida_destroy(&phy_fixed_ida);
 }
 module_exit(fixed_mdio_bus_exit);
 
index 280e8795b46367af40d717d9b16d56f748d6f585..ec2c1eee64051919a7623abf160eacf4233b57de 100644 (file)
@@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+       int err;
+
+       /* The Marvell PHY has an errata which requires
+        * that certain registers get written in order
+        * to restart autonegotiation
+        */
+       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+       err = marvell_set_polarity(phydev, phydev->mdix);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+                       MII_M1111_PHY_LED_DIRECT);
+       if (err < 0)
+               return err;
+
+       err = genphy_config_aneg(phydev);
+       if (err < 0)
+               return err;
+
+       if (phydev->autoneg != AUTONEG_ENABLE) {
+               int bmcr;
+
+               /* A write to speed/duplex bits (that is performed by
+                * genphy_config_aneg() call above) must be followed by
+                * a software reset. Otherwise, the write has no effect.
+                */
+               bmcr = phy_read(phydev, MII_BMCR);
+               if (bmcr < 0)
+                       return bmcr;
+
+               err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_OF_MDIO
 /*
  * Set and/or override some configuration registers based on the
@@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
-       phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
-       phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-       phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
-       err = genphy_config_aneg(phydev);
-
-       return err;
+       return genphy_config_aneg(phydev);
 }
 
 static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
        return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+       int err, oldpage;
+
+       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+       if (err < 0)
+               return err;
+
+       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+       err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+                       MII_88E1121_PHY_LED_DEF);
+       if (err < 0)
+               return err;
+
+       phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+       /* Set marvell,reg-init configuration from device tree */
+       return marvell_config_init(phydev);
+}
+
 static int m88e1510_config_init(struct phy_device *phydev)
 {
        int err;
@@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
                        return err;
        }
 
-       return marvell_config_init(phydev);
+       return m88e1121_config_init(phydev);
 }
 
 static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &m88e1111_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1111_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
@@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
-               .config_init = &marvell_config_init,
+               .config_init = &m88e1121_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
-               .config_init = &marvell_config_init,
+               .config_init = &m88e1121_config_init,
                .config_aneg = &m88e1318_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
index 2e21e9366f76a184aa2c8d770557e6d9ff0db235..b62c4aaee40bd8986a80965a615d327e9c270293 100644 (file)
@@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
         * in all capable mode before using it.
         */
        if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
-               int timeout = 50000;
-
-               /* set "all capable" mode and reset the phy */
+               /* set "all capable" mode */
                rc |= MII_LAN83C185_MODE_ALL;
                phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
-               phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-               /* wait end of reset (max 500 ms) */
-               do {
-                       udelay(10);
-                       if (timeout-- == 0)
-                               return -1;
-                       rc = phy_read(phydev, MII_BMCR);
-               } while (rc & BMCR_RESET);
        }
-       return 0;
+
+       /* reset the phy */
+       return genphy_soft_reset(phydev);
 }
 
 static int lan911x_config_init(struct phy_device *phydev)
index 8dedafa1a95d0b2f8e1db526cc64770876488f46..a30ee427efab3330492c9cda8ceb65259c190423 100644 (file)
@@ -2601,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
        spin_lock_bh(&pn->all_channels_lock);
        list_del(&pch->list);
        spin_unlock_bh(&pn->all_channels_lock);
-       put_net(pch->chan_net);
-       pch->chan_net = NULL;
 
        pch->file.dead = 1;
        wake_up_interruptible(&pch->file.rwait);
@@ -3136,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
  */
 static void ppp_destroy_channel(struct channel *pch)
 {
+       put_net(pch->chan_net);
+       pch->chan_net = NULL;
+
        atomic_dec(&channel_count);
 
        if (!pch->file.dead) {
index 2ace126533cda080a60d3c03ef8e4e61aafc816f..fdee772073236a0542eabc00461957e487fa1370 100644 (file)
@@ -1203,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_dev_open;
        }
 
+       netif_addr_lock_bh(dev);
        dev_uc_sync_multiple(port_dev, dev);
        dev_mc_sync_multiple(port_dev, dev);
+       netif_addr_unlock_bh(dev);
 
        err = vlan_vids_add_by_dev(port_dev, dev);
        if (err) {
index 53759c315b97aeeb54ce5b41fc0e4ec73ee8fac8..877c9516e78174dc41bd1e9d3f4c506d193134a9 100644 (file)
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
        if (cdc_ncm_init(dev))
                goto error2;
 
+       /* Some firmwares need a pause here or they will silently fail
+        * to set up the interface properly.  This value was decided
+        * empirically on a Sierra Wireless MC7455 running 02.08.02.00
+        * firmware.
+        */
+       usleep_range(10000, 20000);
+
        /* configure data interface */
        temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
        if (temp) {
index 3f9f6ed3eec4de5c14e43039e08d53b209e52cb4..e9654a6853818933334488c28a98dbb3073a84cf 100644 (file)
 #include <linux/mdio.h>
 #include <linux/usb/cdc.h>
 #include <linux/suspend.h>
+#include <linux/acpi.h>
 
 /* Information for net-next */
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "3"
+#define NET_VERSION            "5"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define USB_TX_DMA             0xd434
 #define USB_TOLERANCE          0xd490
 #define USB_LPM_CTRL           0xd41a
+#define USB_BMU_RESET          0xd4b0
 #define USB_UPS_CTRL           0xd800
 #define USB_MISC_0             0xd81a
 #define USB_POWER_CUT          0xd80a
 #define TEST_MODE_DISABLE      0x00000001
 #define TX_SIZE_ADJUST1                0x00000100
 
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN                0x01
+#define BMU_RESET_EP_OUT       0x02
+
 /* USB_UPS_CTRL */
 #define POWER_CUT              0x0100
 
 /* SRAM_IMPEDANCE */
 #define RX_DRIVING_MASK                0x6000
 
+/* MAC PASSTHRU */
+#define AD_MASK                        0xfee0
+#define EFUSE                  0xcfdb
+#define PASS_THRU_MASK         0x1
+
 enum rtl_register_content {
        _1000bps        = 0x10,
        _100bps         = 0x08,
@@ -619,6 +630,7 @@ struct r8152 {
                int (*eee_get)(struct r8152 *, struct ethtool_eee *);
                int (*eee_set)(struct r8152 *, struct ethtool_eee *);
                bool (*in_nway)(struct r8152 *);
+               void (*autosuspend_en)(struct r8152 *tp, bool enable);
        } rtl_ops;
 
        int intr_interval;
@@ -1030,6 +1042,65 @@ out1:
        return ret;
 }
 
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+       acpi_status status;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       int ret = -EINVAL;
+       u32 ocp_data;
+       unsigned char buf[6];
+
+       /* test for -AD variant of RTL8153 */
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+       if ((ocp_data & AD_MASK) != 0x1000)
+               return -ENODEV;
+
+       /* test for MAC address pass-through bit */
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+       if ((ocp_data & PASS_THRU_MASK) != 1)
+               return -ENODEV;
+
+       /* returns _AUXMAC_#AABBCCDDEEFF# */
+       status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+       obj = (union acpi_object *)buffer.pointer;
+       if (!ACPI_SUCCESS(status))
+               return -ENODEV;
+       if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid buffer when reading pass-thru MAC addr: "
+                          "(%d, %d)\n",
+                          obj->type, obj->string.length);
+               goto amacout;
+       }
+       if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+           strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid header when reading pass-thru MAC addr\n");
+               goto amacout;
+       }
+       ret = hex2bin(buf, obj->string.pointer + 9, 6);
+       if (!(ret == 0 && is_valid_ether_addr(buf))) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid MAC when reading pass-thru MAC addr: "
+                          "%d, %pM\n", ret, buf);
+               ret = -EINVAL;
+               goto amacout;
+       }
+       memcpy(sa->sa_data, buf, 6);
+       ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+       netif_info(tp, probe, tp->netdev,
+                  "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+       kfree(obj);
+       return ret;
+}
+
 static int set_ethernet_addr(struct r8152 *tp)
 {
        struct net_device *dev = tp->netdev;
@@ -1038,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
 
        if (tp->version == RTL_VER_01)
                ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
-       else
-               ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+       else {
+               /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+                * or system doesn't provide valid _SB.AMAC this will be
+                * be expected to non-zero
+                */
+               ret = vendor_mac_passthru_addr_read(tp, &sa);
+               if (ret < 0)
+                       ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+       }
 
        if (ret < 0) {
                netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2169,7 +2247,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
        u32 mtu = tp->netdev->mtu;
-       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
 
        ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -2290,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
        u32 ocp_data;
        u32 wolopts = 0;
 
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
-       if (!(ocp_data & LAN_WAKE_EN))
-               return 0;
-
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
        if (ocp_data & LINK_ON_WAKE_EN)
                wolopts |= WAKE_PHY;
@@ -2326,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
-       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
        if (wolopts & WAKE_UCAST)
                ocp_data |= UWF_EN;
        if (wolopts & WAKE_BCAST)
                ocp_data |= BWF_EN;
        if (wolopts & WAKE_MCAST)
                ocp_data |= MWF_EN;
-       if (wolopts & WAKE_ANY)
-               ocp_data |= LAN_WAKE_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
 
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -2403,9 +2475,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
        if (enable) {
                u32 ocp_data;
 
-               r8153_u1u2en(tp, false);
-               r8153_u2p3en(tp, false);
-
                __rtl_set_wol(tp, WAKE_ANY);
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2416,7 +2485,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
        } else {
+               u32 ocp_data;
+
                __rtl_set_wol(tp, tp->saved_wolopts);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+               ocp_data &= ~LINK_OFF_WAKE_EN;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+       }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+       rtl_runtime_suspend_enable(tp, enable);
+
+       if (enable) {
+               r8153_u1u2en(tp, false);
+               r8153_u2p3en(tp, false);
+       } else {
                r8153_u2p3en(tp, true);
                r8153_u1u2en(tp, true);
        }
@@ -2456,6 +2546,17 @@ static void r8153_teredo_off(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
 }
 
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+       u32 ocp_data;
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+       ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+       ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
 static void r8152_aldps_en(struct r8152 *tp, bool enable)
 {
        if (enable) {
@@ -2681,6 +2782,7 @@ static void r8153_first_init(struct r8152 *tp)
        r8153_hw_phy_cfg(tp);
 
        rtl8152_nic_reset(tp);
+       rtl_reset_bmu(tp);
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
        ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2844,7 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
        rtl_disable(tp);
+       rtl_reset_bmu(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2906,7 @@ static void rtl8153_disable(struct r8152 *tp)
 {
        r8153_aldps_en(tp, false);
        rtl_disable(tp);
+       rtl_reset_bmu(tp);
        r8153_aldps_en(tp, true);
        usb_enable_lpm(tp->udev);
 }
@@ -3382,15 +3486,11 @@ static void r8153_init(struct r8152 *tp)
        r8153_power_cut_en(tp, false);
        r8153_u1u2en(tp, true);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
-                      PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
-                      U1U2_SPDWN_EN | L1_SPDWN_EN);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
-                      PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
-                      TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
-                      EEE_SPDWN_EN);
+       /* MAC clock speed down */
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
 
        r8153_enable_eee(tp);
        r8153_aldps_en(tp, true);
@@ -3497,7 +3597,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                napi_disable(&tp->napi);
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_stop_rx(tp);
-                       rtl_runtime_suspend_enable(tp, true);
+                       tp->rtl_ops.autosuspend_en(tp, true);
                } else {
                        cancel_delayed_work_sync(&tp->schedule);
                        tp->rtl_ops.down(tp);
@@ -3523,7 +3623,7 @@ static int rtl8152_resume(struct usb_interface *intf)
 
        if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_runtime_suspend_enable(tp, false);
+                       tp->rtl_ops.autosuspend_en(tp, false);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
@@ -3542,7 +3642,7 @@ static int rtl8152_resume(struct usb_interface *intf)
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
        } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                if (tp->netdev->flags & IFF_UP)
-                       rtl_runtime_suspend_enable(tp, false);
+                       tp->rtl_ops.autosuspend_en(tp, false);
                clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        }
 
@@ -4122,6 +4222,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->eee_get            = r8152_get_eee;
                ops->eee_set            = r8152_set_eee;
                ops->in_nway            = rtl8152_in_nway;
+               ops->autosuspend_en     = rtl_runtime_suspend_enable;
                break;
 
        case RTL_VER_03:
@@ -4137,6 +4238,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->eee_get            = r8153_get_eee;
                ops->eee_set            = r8153_set_eee;
                ops->in_nway            = rtl8153_in_nway;
+               ops->autosuspend_en     = rtl8153_runtime_enable;
                break;
 
        default:
@@ -4323,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
index 61ba464049374593316e1c0b869774861ee79b5e..6086a0163249c5e05d8cdf92b1b834c3da4a71a7 100644 (file)
@@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
        dev->hard_mtu = net->mtu + net->hard_header_len;
        if (dev->rx_urb_size == old_hard_mtu) {
                dev->rx_urb_size = dev->hard_mtu;
-               if (dev->rx_urb_size > old_rx_urb_size)
+               if (dev->rx_urb_size > old_rx_urb_size) {
+                       usbnet_pause_rx(dev);
                        usbnet_unlink_rx_urbs(dev);
+                       usbnet_resume_rx(dev);
+               }
        }
 
        /* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
        } else if (netif_running (dev->net) &&
                   netif_device_present (dev->net) &&
                   netif_carrier_ok(dev->net) &&
-                  !timer_pending (&dev->delay) &&
-                  !test_bit (EVENT_RX_HALT, &dev->flags)) {
+                  !timer_pending(&dev->delay) &&
+                  !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+                  !test_bit(EVENT_RX_HALT, &dev->flags)) {
                int     temp = dev->rxq.qlen;
 
                if (temp < RX_QLEN(dev)) {
index dff08842f26d034dc21a7a73b8a1c9b0cb72236a..8bd8c7e1ee8724c56c670850c9ecc20590a68707 100644 (file)
@@ -304,7 +304,7 @@ static int vrf_rt6_create(struct net_device *dev)
        dst_hold(&rt6->dst);
 
        rt6->rt6i_table = rt6i_table;
-       rt6->dst.output = vrf_output6;
+       rt6->dst.output = vrf_output6;
        rcu_assign_pointer(vrf->rt6, rt6);
 
        rc = 0;
@@ -403,7 +403,7 @@ static int vrf_rtable_create(struct net_device *dev)
        if (!rth)
                return -ENOMEM;
 
-       rth->dst.output = vrf_output;
+       rth->dst.output = vrf_output;
        rth->rt_table_id = vrf->tb_id;
 
        rcu_assign_pointer(vrf->rth, rth);
index f999db2f97b4bb1b9254affc58f187b35da1c3f1..b3b9db68f758a2062a2f6bde2bfb9805d271cc6c 100644 (file)
@@ -2952,30 +2952,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        return 0;
 }
 
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
-                                   u8 name_assign_type, struct vxlan_config *conf)
-{
-       struct nlattr *tb[IFLA_MAX+1];
-       struct net_device *dev;
-       int err;
-
-       memset(&tb, 0, sizeof(tb));
-
-       dev = rtnl_create_link(net, name, name_assign_type,
-                              &vxlan_link_ops, tb);
-       if (IS_ERR(dev))
-               return dev;
-
-       err = vxlan_dev_configure(net, dev, conf);
-       if (err < 0) {
-               free_netdev(dev);
-               return ERR_PTR(err);
-       }
-
-       return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
@@ -3268,6 +3244,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
        .get_link_net   = vxlan_get_link_net,
 };
 
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+                                   u8 name_assign_type,
+                                   struct vxlan_config *conf)
+{
+       struct nlattr *tb[IFLA_MAX + 1];
+       struct net_device *dev;
+       int err;
+
+       memset(&tb, 0, sizeof(tb));
+
+       dev = rtnl_create_link(net, name, name_assign_type,
+                              &vxlan_link_ops, tb);
+       if (IS_ERR(dev))
+               return dev;
+
+       err = vxlan_dev_configure(net, dev, conf);
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
+
+       err = rtnl_configure_link(dev, NULL);
+       if (err < 0) {
+               LIST_HEAD(list_kill);
+
+               vxlan_dellink(dev, &list_kill);
+               unregister_netdevice_many(&list_kill);
+               return ERR_PTR(err);
+       }
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
                                             struct net_device *dev)
 {
index 49af62428c8864571593a98c0682f97170cfecd2..a92a0ba829f5c19c8237b539be0b9db184efe31b 100644 (file)
@@ -1083,7 +1083,7 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
                        }
 
                        ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
-                                       ar->running_fw->fw_file.fw_features,
+                                       fw_file->fw_features,
                                        sizeof(fw_file->fw_features));
                        break;
                case ATH10K_FW_IE_FW_IMAGE:
index cc979a4faeb02621b230842dc8cef71a55783bdf..813cdd2621a168ff5696b723ca924f874b2a3a57 100644 (file)
@@ -1904,7 +1904,6 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                        return;
                }
        }
-       ath10k_htt_rx_msdu_buff_replenish(htt);
 }
 
 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
index 6dd1d26b357f5dc8076c72ef3f2c05f3594ff2ce..4040f9413e86e8d06df8e94e31759cd1c8096c90 100644 (file)
@@ -679,10 +679,10 @@ static int ath10k_peer_create(struct ath10k *ar,
 
        peer = ath10k_peer_find(ar, vdev_id, addr);
        if (!peer) {
+               spin_unlock_bh(&ar->data_lock);
                ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
                            addr, vdev_id);
                ath10k_wmi_peer_delete(ar, vdev_id, addr);
-               spin_unlock_bh(&ar->data_lock);
                return -ENOENT;
        }
 
index 9272ca90632b983b5eec1a57612ac29307f15973..80ff69f9922977e6810e34efa62641ea72316318 100644 (file)
@@ -1122,12 +1122,12 @@ enum {
 #define AR9300_NUM_GPIO                          16
 #define AR9330_NUM_GPIO                                 16
 #define AR9340_NUM_GPIO                                 23
-#define AR9462_NUM_GPIO                                 10
+#define AR9462_NUM_GPIO                                 14
 #define AR9485_NUM_GPIO                                 12
 #define AR9531_NUM_GPIO                                 18
 #define AR9550_NUM_GPIO                                 24
 #define AR9561_NUM_GPIO                                 23
-#define AR9565_NUM_GPIO                                 12
+#define AR9565_NUM_GPIO                                 14
 #define AR9580_NUM_GPIO                                 16
 #define AR7010_NUM_GPIO                          16
 
@@ -1139,12 +1139,12 @@ enum {
 #define AR9300_GPIO_MASK                        0x0000F4FF
 #define AR9330_GPIO_MASK                        0x0000F4FF
 #define AR9340_GPIO_MASK                        0x0000000F
-#define AR9462_GPIO_MASK                        0x000003FF
+#define AR9462_GPIO_MASK                        0x00003FFF
 #define AR9485_GPIO_MASK                        0x00000FFF
 #define AR9531_GPIO_MASK                        0x0000000F
 #define AR9550_GPIO_MASK                        0x0000000F
 #define AR9561_GPIO_MASK                        0x0000000F
-#define AR9565_GPIO_MASK                        0x00000FFF
+#define AR9565_GPIO_MASK                        0x00003FFF
 #define AR9580_GPIO_MASK                        0x0000F4FF
 #define AR7010_GPIO_MASK                        0x0000FFFF
 
index e5f267b21316ca413f41ed0741a9144c4e7d0f17..18a8474b5760ca6ce6c5636a0e39770232ae818a 100644 (file)
@@ -3851,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        if (idx != 0)
                return -ENOENT;
 
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (!fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return -ENOENT;
 
        mutex_lock(&mvm->mutex);
@@ -3898,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (!fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return;
 
        /* if beacon filtering isn't on mac80211 does it anyway */
index ac2c5718e454d732e0ac3ed42a716e07ec8df402..2c61516d06fff52318d972feb0defbab41654d62 100644 (file)
@@ -581,7 +581,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
                            struct iwl_rx_mpdu_desc *desc)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_sta *mvm_sta;
        struct iwl_mvm_baid_data *baid_data;
        struct iwl_mvm_reorder_buffer *buffer;
        struct sk_buff *tail;
@@ -604,6 +604,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
        if (WARN_ON(IS_ERR_OR_NULL(sta)))
                return false;
 
+       mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
        /* not a data packet */
        if (!ieee80211_is_data_qos(hdr->frame_control) ||
            is_multicast_ether_addr(hdr->addr1))
index 6f609dd5c2220dc3b038e4d74b6407cee27a990e..e78fc567ff7d83b26161fd709092493107107cf9 100644 (file)
@@ -1222,7 +1222,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
        return -EIO;
 }
 
-#define SCAN_TIMEOUT (16 * HZ)
+#define SCAN_TIMEOUT (20 * HZ)
 
 void iwl_mvm_scan_timeout(unsigned long data)
 {
index fea4d3437e2f29543a2322af44d7892817125fe2..b23ab4a4504f7f140877ff734682b92607e276b6 100644 (file)
@@ -1852,12 +1852,18 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
            mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
                u8 sta_id = mvmvif->ap_sta_id;
 
+               sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+                                           lockdep_is_held(&mvm->mutex));
+
                /*
                 * It is possible that the 'sta' parameter is NULL,
                 * for example when a GTK is removed - the sta_id will then
                 * be the AP ID, and no station was passed by mac80211.
                 */
-               return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+               if (IS_ERR_OR_NULL(sta))
+                       return NULL;
+
+               return iwl_mvm_sta_from_mac80211(sta);
        }
 
        return NULL;
@@ -1955,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                struct ieee80211_key_seq seq;
                const u8 *pn;
 
+               switch (keyconf->cipher) {
+               case WLAN_CIPHER_SUITE_AES_CMAC:
+                       igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
                memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                pn = seq.aes_cmac.pn;
index fe19ace0d6a01bf75ff0f6e2d9ba38b6c32b2de3..b04cf30f3959221c99a4c53cd5f12345fee27017 100644 (file)
@@ -1149,7 +1149,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
 
                for (i = 0; i < retry; i++) {
                        path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
-                       if (path_a_ok == 0x03) {
+                       if (path_b_ok == 0x03) {
                                val32 = rtl8xxxu_read32(priv,
                                                        REG_RX_POWER_BEFORE_IQK_B_2);
                                result[t][6] = (val32 >> 16) & 0x3ff;
index f7718ec685fadaee4e1460210b3733b5950acfc5..cea8350fbc7ec2f2e617199fd2bc8b8cb1df2fbb 100644 (file)
@@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
        u64 checksum, offset;
+       unsigned long align;
+       enum nd_pfn_mode mode;
        struct nd_namespace_io *nsio;
        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
@@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                return -ENXIO;
        }
 
+       align = le32_to_cpu(pfn_sb->align);
+       offset = le64_to_cpu(pfn_sb->dataoff);
+       if (align == 0)
+               align = 1UL << ilog2(offset);
+       mode = le32_to_cpu(pfn_sb->mode);
+
        if (!nd_pfn->uuid) {
-               /* from probe we allocate */
+               /*
+                * When probing a namepace via nd_pfn_probe() the uuid
+                * is NULL (see: nd_pfn_devinit()) we init settings from
+                * pfn_sb
+                */
                nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
                if (!nd_pfn->uuid)
                        return -ENOMEM;
+               nd_pfn->align = align;
+               nd_pfn->mode = mode;
        } else {
-               /* from init we validate */
+               /*
+                * When probing a pfn / dax instance we validate the
+                * live settings against the pfn_sb
+                */
                if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
                        return -ENODEV;
+
+               /*
+                * If the uuid validates, but other settings mismatch
+                * return EINVAL because userspace has managed to change
+                * the configuration without specifying new
+                * identification.
+                */
+               if (nd_pfn->align != align || nd_pfn->mode != mode) {
+                       dev_err(&nd_pfn->dev,
+                                       "init failed, settings mismatch\n");
+                       dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
+                                       nd_pfn->align, align, nd_pfn->mode,
+                                       mode);
+                       return -EINVAL;
+               }
        }
 
-       if (nd_pfn->align == 0)
-               nd_pfn->align = le32_to_cpu(pfn_sb->align);
-       if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+       if (align > nvdimm_namespace_capacity(ndns)) {
                dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
-                               nd_pfn->align, nvdimm_namespace_capacity(ndns));
+                               align, nvdimm_namespace_capacity(ndns));
                return -EINVAL;
        }
 
@@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
         * namespace has changed since the pfn superblock was
         * established.
         */
-       offset = le64_to_cpu(pfn_sb->dataoff);
        nsio = to_nd_namespace_io(&ndns->dev);
        if (offset >= resource_size(&nsio->res)) {
                dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
@@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                return -EBUSY;
        }
 
-       if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
+       if ((align && !IS_ALIGNED(offset, align))
                        || !IS_ALIGNED(offset, PAGE_SIZE)) {
-               dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
-                               offset);
+               dev_err(&nd_pfn->dev,
+                               "bad offset: %#llx dax disabled align: %#lx\n",
+                               offset, align);
                return -ENXIO;
        }
 
@@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
        res->start += start_pad;
        res->end -= end_trunc;
 
-       nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
        if (nd_pfn->mode == PFN_MODE_RAM) {
                if (offset < SZ_8K)
                        return ERR_PTR(-EINVAL);
index 1a51584a382bf28dcf567a3fb039bc1a571cc144..d5fb55c0a9d95cdd2e6ac9cc99ca17f44b2d6ef7 100644 (file)
@@ -1394,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
        return nsa->ns_id - nsb->ns_id;
 }
 
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
-       struct nvme_ns *ns;
-
-       lockdep_assert_held(&ctrl->namespaces_mutex);
+       struct nvme_ns *ns, *ret = NULL;
 
+       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->ns_id == nsid)
-                       return ns;
+               if (ns->ns_id == nsid) {
+                       kref_get(&ns->kref);
+                       ret = ns;
+                       break;
+               }
                if (ns->ns_id > nsid)
                        break;
        }
-       return NULL;
+       mutex_unlock(&ctrl->namespaces_mutex);
+       return ret;
 }
 
 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -1415,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        struct gendisk *disk;
        int node = dev_to_node(ctrl->dev);
 
-       lockdep_assert_held(&ctrl->namespaces_mutex);
-
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
                return;
@@ -1457,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (nvme_revalidate_disk(ns->disk))
                goto out_free_disk;
 
-       list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_add_tail(&ns->list, &ctrl->namespaces);
+       mutex_unlock(&ctrl->namespaces_mutex);
+
        kref_get(&ctrl->kref);
        if (ns->type == NVME_NS_LIGHTNVM)
                return;
@@ -1480,8 +1484,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -1494,8 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
+
+       mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
-       synchronize_rcu();
+       mutex_unlock(&ns->ctrl->namespaces_mutex);
+
        nvme_put_ns(ns);
 }
 
@@ -1503,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
        struct nvme_ns *ns;
 
-       ns = nvme_find_ns(ctrl, nsid);
+       ns = nvme_find_get_ns(ctrl, nsid);
        if (ns) {
                if (revalidate_disk(ns->disk))
                        nvme_ns_remove(ns);
+               nvme_put_ns(ns);
        } else
                nvme_alloc_ns(ctrl, nsid);
 }
@@ -1535,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
                        nvme_validate_ns(ctrl, nsid);
 
                        while (++prev < nsid) {
-                               ns = nvme_find_ns(ctrl, prev);
-                               if (ns)
+                               ns = nvme_find_get_ns(ctrl, prev);
+                               if (ns) {
                                        nvme_ns_remove(ns);
+                                       nvme_put_ns(ns);
+                               }
                        }
                }
                nn -= j;
@@ -1552,8 +1560,6 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
        struct nvme_ns *ns, *next;
        unsigned i;
 
-       lockdep_assert_held(&ctrl->namespaces_mutex);
-
        for (i = 1; i <= nn; i++)
                nvme_validate_ns(ctrl, i);
 
@@ -1576,7 +1582,6 @@ static void nvme_scan_work(struct work_struct *work)
        if (nvme_identify_ctrl(ctrl, &id))
                return;
 
-       mutex_lock(&ctrl->namespaces_mutex);
        nn = le32_to_cpu(id->nn);
        if (ctrl->vs >= NVME_VS(1, 1) &&
            !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -1585,6 +1590,7 @@ static void nvme_scan_work(struct work_struct *work)
        }
        nvme_scan_ns_sequential(ctrl, nn);
  done:
+       mutex_lock(&ctrl->namespaces_mutex);
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
        mutex_unlock(&ctrl->namespaces_mutex);
        kfree(id);
@@ -1604,6 +1610,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns, *next;
@@ -1617,10 +1628,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
        if (ctrl->state == NVME_CTRL_DEAD)
                nvme_kill_queues(ctrl);
 
-       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
-       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
 
@@ -1791,11 +1800,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
-               if (!kref_get_unless_zero(&ns->kref))
-                       continue;
-
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
                 * end buffered writers dirtying pages that can't be synced.
@@ -1806,10 +1812,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
                blk_set_queue_dying(ns->queue);
                blk_mq_abort_requeue_list(ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
-
-               nvme_put_ns(ns);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
 
@@ -1817,8 +1821,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                spin_lock_irq(ns->queue->queue_lock);
                queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
                spin_unlock_irq(ns->queue->queue_lock);
@@ -1826,7 +1830,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
                blk_mq_cancel_requeue_work(ns->queue);
                blk_mq_stop_hw_queues(ns->queue);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_queues);
 
@@ -1834,13 +1838,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
                blk_mq_kick_requeue_list(ns->queue);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
index 95ab6b2a0de5e1bc8dda4bdd640d2ff725f436ae..58dff80e9386d67d43ac5a1b9f67ba55f7c49788 100644 (file)
@@ -109,8 +109,8 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
        }
 
        usb2->phy = devm_phy_create(dev, NULL, &ops);
-       if (IS_ERR(dev))
-               return PTR_ERR(dev);
+       if (IS_ERR(usb2->phy))
+               return PTR_ERR(usb2->phy);
 
        phy_set_drvdata(usb2->phy, usb2);
        platform_set_drvdata(pdev, usb2);
index 3acd2a1808dfbf9ccfa5d58467b6defdb793c2bb..213e2e15339c44dff742b2394455d7138e0d524e 100644 (file)
@@ -1143,7 +1143,8 @@ static int miphy28lp_probe_resets(struct device_node *node,
        struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
        int err;
 
-       miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst");
+       miphy_phy->miphy_rst =
+               of_reset_control_get_shared(node, "miphy-sw-rst");
 
        if (IS_ERR(miphy_phy->miphy_rst)) {
                dev_err(miphy_dev->dev,
index 76bb88f0700a72b8d06480daf4d8e2aa8b0b51de..4be3f5dbbc9f1d8b9dd416e8b930cb24b6ee3919 100644 (file)
@@ -144,12 +144,6 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
        extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
 }
 
-static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
-{
-       return !!(readl(ch->base + USB2_ADPCTRL) &
-                 USB2_ADPCTRL_OTGSESSVLD);
-}
-
 static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
 {
        return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -157,13 +151,7 @@ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
 
 static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
 {
-       bool is_host = true;
-
-       /* B-device? */
-       if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
-               is_host = false;
-
-       if (is_host)
+       if (!rcar_gen3_check_id(ch))
                rcar_gen3_init_for_host(ch);
        else
                rcar_gen3_init_for_peri(ch);
index 793ecb6d87bcaa2a56a914b2745bea6453929637..8b267a746576f03be24adf5dec028d45289e31ea 100644 (file)
@@ -90,7 +90,7 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
                return -ENODEV;
 
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
-       if (IS_ERR(dp))
+       if (!dp)
                return -ENOMEM;
 
        dp->dev = dev;
index 1d5ae5f8ef694cf9ca389dd3a0742bec189a6955..b1f44ab669fb5cef099a54daf07eeb26f76fc770 100644 (file)
@@ -105,13 +105,13 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
        phy_dev->dev = dev;
        dev_set_drvdata(dev, phy_dev);
 
-       phy_dev->rstc = devm_reset_control_get(dev, "global");
+       phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
        if (IS_ERR(phy_dev->rstc)) {
                dev_err(dev, "failed to ctrl picoPHY reset\n");
                return PTR_ERR(phy_dev->rstc);
        }
 
-       phy_dev->rstport = devm_reset_control_get(dev, "port");
+       phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
        if (IS_ERR(phy_dev->rstport)) {
                dev_err(dev, "failed to ctrl picoPHY reset\n");
                return PTR_ERR(phy_dev->rstport);
index bae54f7a1f4877b88297858783b0b864783ec1fe..de3101fbbf40e1f3c77e7a1f22c6929c2db8fb01 100644 (file)
@@ -175,7 +175,7 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
 {
        struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
        u32 temp, usbc_bit = BIT(phy->index * 2);
-       void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+       void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
        int i;
 
        mutex_lock(&phy_data->mutex);
@@ -514,9 +514,9 @@ static int sun4i_usb_phy_remove(struct platform_device *pdev)
 
        if (data->vbus_power_nb_registered)
                power_supply_unreg_notifier(&data->vbus_power_nb);
-       if (data->id_det_irq >= 0)
+       if (data->id_det_irq > 0)
                devm_free_irq(dev, data->id_det_irq, data);
-       if (data->vbus_det_irq >= 0)
+       if (data->vbus_det_irq > 0)
                devm_free_irq(dev, data->vbus_det_irq, data);
 
        cancel_delayed_work_sync(&data->detect);
@@ -645,11 +645,11 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
 
        data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
        data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
-       if ((data->id_det_gpio && data->id_det_irq < 0) ||
-           (data->vbus_det_gpio && data->vbus_det_irq < 0))
+       if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+           (data->vbus_det_gpio && data->vbus_det_irq <= 0))
                data->phy0_poll = true;
 
-       if (data->id_det_irq >= 0) {
+       if (data->id_det_irq > 0) {
                ret = devm_request_irq(dev, data->id_det_irq,
                                sun4i_usb_phy0_id_vbus_det_irq,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -660,7 +660,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                }
        }
 
-       if (data->vbus_det_irq >= 0) {
+       if (data->vbus_det_irq > 0) {
                ret = devm_request_irq(dev, data->vbus_det_irq,
                                sun4i_usb_phy0_id_vbus_det_irq,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
index e4bc1151e04f3c2d7ed0eac8b659ae27244432ba..42a5c1dddfefaf7414eca1809ef7d1bf75a11628 100644 (file)
@@ -23,7 +23,7 @@ obj-$(CONFIG_PINCTRL_PISTACHIO)       += pinctrl-pistachio.o
 obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
 obj-$(CONFIG_PINCTRL_SINGLE)   += pinctrl-single.o
 obj-$(CONFIG_PINCTRL_SIRF)     += sirf/
-obj-$(CONFIG_PINCTRL_TEGRA)    += tegra/
+obj-$(CONFIG_ARCH_TEGRA)       += tegra/
 obj-$(CONFIG_PINCTRL_TZ1090)   += pinctrl-tz1090.o
 obj-$(CONFIG_PINCTRL_TZ1090_PDC)       += pinctrl-tz1090-pdc.o
 obj-$(CONFIG_PINCTRL_U300)     += pinctrl-u300.o
index 47ccfcc8a647ccacdd0880ab770dc2c102d2ba9c..eccb47480e1db12d0212ea54c47b94d4b79d5cc8 100644 (file)
@@ -209,9 +209,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
                pin_reg = &info->pin_regs[pin_id];
 
                if (pin_reg->mux_reg == -1) {
-                       dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+                       dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
                                info->pins[pin_id].name);
-                       return -EINVAL;
+                       continue;
                }
 
                if (info->flags & SHARE_MUX_CONF_REG) {
index 677a811b3a6ffcb604d8a6c4cb836ec8dd81575e..7abfd42e8ffdf86953764ad2fa2c1c9bf70fcc57 100644 (file)
@@ -401,9 +401,9 @@ static const struct byt_simple_func_mux byt_score_sata_mux[] = {
 static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
 static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
 static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
-static const unsigned int byt_score_plt_clk4_pins[] = { 99 };
-static const unsigned int byt_score_plt_clk5_pins[] = { 100 };
-static const unsigned int byt_score_plt_clk3_pins[] = { 101 };
+static const unsigned int byt_score_plt_clk3_pins[] = { 99 };
+static const unsigned int byt_score_plt_clk4_pins[] = { 100 };
+static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
 static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
        SIMPLE_FUNC("plt_clk", 1),
 };
index cf9bafa10acfb51ad42af96cf6580d6de48c1b1d..bfdf720db270d8b2c47007455406d9887ed028c6 100644 (file)
@@ -1580,6 +1580,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
                else
                        mask &= ~soc_mask;
                pcs->write(mask, pcswi->reg);
+
+               /* flush posted write */
+               mask = pcs->read(pcswi->reg);
                raw_spin_unlock(&pcs->lock);
        }
 
index a927379b67949d1603eaf20e180febbc4e1c0823..d9ea2be69cc4a3a682045c345e0ce88800717a58 100644 (file)
@@ -1,4 +1,4 @@
-obj-y                                  += pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA)            += pinctrl-tegra.o
 obj-$(CONFIG_PINCTRL_TEGRA20)          += pinctrl-tegra20.o
 obj-$(CONFIG_PINCTRL_TEGRA30)          += pinctrl-tegra30.o
 obj-$(CONFIG_PINCTRL_TEGRA114)         += pinctrl-tegra114.o
index 6d8ee3b1587276494274617d65029534df1d1af6..8abd80dbcbed7974b4ace4265dd9dad1bca89edd 100644 (file)
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
                goto exit;
        }
 
+       if (u_cmd.outsize != s_cmd->outsize ||
+           u_cmd.insize != s_cmd->insize) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
        s_cmd->command += ec->cmd_offset;
        ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
        /* Only copy data to userland if data was received. */
        if (ret < 0)
                goto exit;
 
-       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
                ret = -EFAULT;
 exit:
        kfree(s_cmd);
index 4034d2d4c50795684611df474755855f7376ef6b..a66be137324c096a3a6b0f1e5fba18a3c4993f1b 100644 (file)
 /**
  * DOC: Overview
  *
- * :1:  http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
- * :2:  http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
- *
  * gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
- * A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas.
+ * A `Lattice XP2`_ on pre-retinas, a `Renesas R4F2113`_ on retinas.
  *
  * (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
  * dual GPUs but no built-in display.)
  *
  * gmux is connected to the LPC bus of the southbridge. Its I/O ports are
  * accessed differently depending on the microcontroller: Driver functions
- * to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux
- * are infixed `_index_`.
+ * to access a pre-retina gmux are infixed ``_pio_``, those for a retina gmux
+ * are infixed ``_index_``.
+ *
+ * .. _Lattice XP2:
+ *     http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
+ * .. _Renesas R4F2113:
+ *     http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
  */
 
 struct apple_gmux_data {
@@ -272,15 +274,15 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
 /**
  * DOC: Backlight control
  *
- * :3:  http://www.ti.com/lit/ds/symlink/lp8543.pdf
- * :4:  http://www.ti.com/lit/ds/symlink/lp8545.pdf
- *
  * On single GPU MacBooks, the PWM signal for the backlight is generated by
  * the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
  * to conserve energy. Hence the PWM signal needs to be generated by a separate
  * backlight driver which is controlled by gmux. The earliest generation
- * MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models
- * use a {4}[TI LP8545].
+ * MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models
+ * use a `TI LP8545`_.
+ *
+ * .. _TI LP8543: http://www.ti.com/lit/ds/symlink/lp8543.pdf
+ * .. _TI LP8545: http://www.ti.com/lit/ds/symlink/lp8545.pdf
  */
 
 static int gmux_get_brightness(struct backlight_device *bd)
@@ -312,28 +314,20 @@ static const struct backlight_ops gmux_bl_ops = {
 /**
  * DOC: Graphics mux
  *
- * :5:  http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
- * :6:  http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
- * :7:  http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
- * :8:  https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
- * :9:  http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
- * :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
- * :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
- *
  * On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
  * either of them to the panel. One of the tricks gmux has up its sleeve is
  * to lengthen the blanking interval of its output during a switch to
  * synchronize it with the GPU switched to. This allows for a flicker-free
- * switch that is imperceptible by the user ({5}[US 8,687,007 B2]).
+ * switch that is imperceptible by the user (`US 8,687,007 B2`_).
  *
  * On retinas, muxing is no longer done by gmux itself, but by a separate
  * chip which is controlled by gmux. The chip is triple sourced, it is
- * either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412].
+ * either an `NXP CBTL06142`_, `TI HD3SS212`_ or `Pericom PI3VDP12412`_.
  * The panel is driven with eDP instead of LVDS since the pixel clock
  * required for retina resolution exceeds LVDS' limits.
  *
  * Pre-retinas are able to switch the panel's DDC pins separately.
- * This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux.
+ * This is handled by a `TI SN74LV4066A`_ which is controlled by gmux.
  * The inactive GPU can thus probe the panel's EDID without switching over
  * the entire panel. Retinas lack this functionality as the chips used for
  * eDP muxing are incapable of switching the AUX channel separately (see
@@ -344,15 +338,15 @@ static const struct backlight_ops gmux_bl_ops = {
  *
  * The external DP port is only fully switchable on the first two unibody
  * MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
- * {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the
+ * `NXP CBTL06141`_ which is controlled by gmux. It's the predecessor of the
  * eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
  *
  * The following MacBook Pro generations replaced the external DP port with a
  * combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
  * connecting it either to the discrete GPU or the Thunderbolt controller.
  * Oddly enough, while the full port is no longer switchable, AUX and HPD
- * are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas
- * MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the
+ * are still switchable by way of an `NXP CBTL03062`_ (on pre-retinas
+ * MBP8 2011 and MBP9 2012) or two `TI TS3DS10224`_ (on retinas) under the
  * control of gmux. Since the integrated GPU is missing the main link,
  * external displays appear to it as phantoms which fail to link-train.
  *
@@ -365,10 +359,19 @@ static const struct backlight_ops gmux_bl_ops = {
  * of this feature.
  *
  * gmux' initial switch state on bootup is user configurable via the EFI
- * variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte,
+ * variable ``gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9`` (5th byte,
  * 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
  * switch the panel and the external DP connector and allocates a framebuffer
  * for the selected GPU.
+ *
+ * .. _US 8,687,007 B2: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
+ * .. _NXP CBTL06141:   http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _NXP CBTL06142:   http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _TI HD3SS212:     http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
+ * .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
+ * .. _TI SN74LV4066A:  http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
+ * .. _NXP CBTL03062:   http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
+ * .. _TI TS3DS10224:   http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
  */
 
 static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
index 456987c88baab9f4b8a0d2b7224b10d659b8bddc..b13cd074c52afc90c086bc16cc011b7adb0ac91f 100644 (file)
@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
 
        WARN_ON(tzd == NULL);
        psy = tzd->devdata;
-       ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+       ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+       if (ret)
+               return ret;
 
        /* Convert tenths of degree Celsius to milli degree Celsius. */
-       if (!ret)
-               *temp = val.intval * 100;
+       *temp = val.intval * 100;
 
        return ret;
 }
@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
        int ret;
 
        psy = tcd->devdata;
-       ret = psy->desc->get_property(psy,
-               POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
-       if (!ret)
-               *state = val.intval;
+       ret = power_supply_get_property(psy,
+                       POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+       if (ret)
+               return ret;
+
+       *state = val.intval;
 
        return ret;
 }
@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
        int ret;
 
        psy = tcd->devdata;
-       ret = psy->desc->get_property(psy,
-               POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
-       if (!ret)
-               *state = val.intval;
+       ret = power_supply_get_property(psy,
+                       POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+       if (ret)
+               return ret;
+
+       *state = val.intval;
 
        return ret;
 }
index d9f56730c735819010e9efff4f8499814c7fbe58..73dfae41def8a659978eec9a738448f604cb1973 100644 (file)
@@ -197,6 +197,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
 {
        struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
        struct tps65217_charger *charger;
+       struct power_supply_config cfg = {};
        int ret;
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -208,9 +209,12 @@ static int tps65217_charger_probe(struct platform_device *pdev)
        charger->tps = tps;
        charger->dev = &pdev->dev;
 
+       cfg.of_node = pdev->dev.of_node;
+       cfg.drv_data = charger;
+
        charger->ac = devm_power_supply_register(&pdev->dev,
                                                 &tps65217_charger_desc,
-                                                NULL);
+                                                &cfg);
        if (IS_ERR(charger->ac)) {
                dev_err(&pdev->dev, "failed: power supply register\n");
                return PTR_ERR(charger->ac);
index 38a8bbe7481008232e7336777850d683afb765be..83797d89c30fe88665c5d7214c391a0a571cfe88 100644 (file)
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
        struct pps_client_pp *device;
 
        /* FIXME: oooh, this is ugly! */
-       if (strcmp(pardev->name, KBUILD_MODNAME))
+       if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
                /* not our port */
                return;
 
index 63cd5e68c86494724eae44502bed2d7f590cbf78..3a6d0290c54c0fbd0f1c82ffcd5329c2facc56ae 100644 (file)
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
                if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
                        sreg->sel = 22;
 
-               if (!sreg->sel) {
+               if (!sreg->bypass && !sreg->sel) {
                        dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
                        return -EINVAL;
                }
index 321e804aeab0a33c075caafb95ccc7bff4bf417f..a1b49a6d538f1493489f2e45092feb6dd0b96219 100644 (file)
@@ -123,6 +123,9 @@ static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
        unsigned int val;
        int ret;
 
+       if (!rinfo)
+               return 0;
+
        switch (fps_src) {
        case MAX77620_FPS_SRC_0:
        case MAX77620_FPS_SRC_1:
@@ -171,6 +174,9 @@ static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
        int pd = rpdata->active_fps_pd_slot;
        int ret = 0;
 
+       if (!rinfo)
+               return 0;
+
        if (is_suspend) {
                pu = rpdata->suspend_fps_pu_slot;
                pd = rpdata->suspend_fps_pd_slot;
@@ -680,7 +686,6 @@ static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
        RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
        RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
        RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
-       RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
 
        RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
        RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
index 526bf23dcb49a543032c6ac17da76c2e09009545..6c7fe4778793758478d9a27e3f9d11f6d4b4a1b2 100644 (file)
@@ -152,7 +152,6 @@ static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
        .enable = rpm_reg_enable,
        .disable = rpm_reg_disable,
        .is_enabled = rpm_reg_is_enabled,
-       .list_voltage = regulator_list_voltage_linear_range,
 
        .get_voltage = rpm_reg_get_voltage,
        .set_voltage = rpm_reg_set_voltage,
index 80b1979e8d955f6022e7810a4b65a62f13ec1224..df036b872b050b835d0fea6f620726dc8876225b 100644 (file)
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
                qeth_l2_set_offline(cgdev);
 
        if (card->dev) {
+               netif_napi_del(&card->napi);
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
index ac544330daeb7cffaccc37306f041deffd1475bb..709b52339ff9a5907812d315e65bca964ddd11be 100644 (file)
@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
                qeth_l3_set_offline(cgdev);
 
        if (card->dev) {
+               netif_napi_del(&card->napi);
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
index d6a691e27d33317c72d785340b5b371378932741..d6803a9e5ab8a49ea045bcbfafce7029ef439c5d 100644 (file)
@@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
                ioa_cfg->intr_flag = IPR_USE_MSI;
        else {
                ioa_cfg->intr_flag = IPR_USE_LSI;
+               ioa_cfg->clear_isr = 1;
                ioa_cfg->nvectors = 1;
                dev_info(&pdev->dev, "Cannot enable MSI.\n");
        }
index 5649c200d37ce12f80fc49ad860a185c1a52af5f..a92a62dea7934429e48c252f77405a8dd70b6400 100644 (file)
@@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!vha->flags.online)
                return;
 
-       if (rsp->msix->cpuid != smp_processor_id()) {
+       if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
                /* if kernel does not notify qla of IRQ's CPU change,
                 * then set it here.
                 */
index ff41c310c900a5760bf33423a5db4362c5dfc25b..eaccd651ccda0d239af91ebfb6dfdbd97ac340e3 100644 (file)
@@ -429,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
         * here, and we don't know what device it is
         * trying to work with, leave it as-is.
         */
-       vmax = 8;       /* max length of vendor */
+       vmax = sizeof(devinfo->vendor);
        vskip = vendor;
        while (vmax > 0 && *vskip == ' ') {
                vmax--;
@@ -439,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
        while (vmax > 0 && vskip[vmax - 1] == ' ')
                --vmax;
 
-       mmax = 16;      /* max length of model */
+       mmax = sizeof(devinfo->model);
        mskip = model;
        while (mmax > 0 && *mskip == ' ') {
                mmax--;
@@ -455,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                         * Behave like the older version of get_device_flags.
                         */
                        if (memcmp(devinfo->vendor, vskip, vmax) ||
-                                       devinfo->vendor[vmax])
+                                       (vmax < sizeof(devinfo->vendor) &&
+                                               devinfo->vendor[vmax]))
                                continue;
                        if (memcmp(devinfo->model, mskip, mmax) ||
-                                       devinfo->model[mmax])
+                                       (mmax < sizeof(devinfo->model) &&
+                                               devinfo->model[mmax]))
                                continue;
                        return devinfo;
                } else {
index cd89682065b98d7a06a7d7064ec30aff2a5e7fef..1026e180eed79019f0ec55d9ed5ba427ce71cefa 100644 (file)
@@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one(
                struct spi_device *spi,
                struct spi_transfer *xfer)
 {
-       int ret = 1;
+       int ret = 0;
        struct rockchip_spi *rs = spi_master_get_devdata(master);
 
        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one(
                        spi_enable_chip(rs, 1);
                        ret = rockchip_spi_prepare_dma(rs);
                }
+               /* successful DMA prepare means the transfer is in progress */
+               ret = ret ? ret : 1;
        } else {
                spi_enable_chip(rs, 1);
                ret = rockchip_spi_pio_transfer(rs);
index 1ddd9e2309b685576a8c63abc1770f731a7acd0c..cf007f3b83ec92f1d8f976ea2b68978dbdd36f20 100644 (file)
@@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
 {
        struct sun4i_spi *sspi = spi_master_get_devdata(master);
        unsigned int mclk_rate, div, timeout;
+       unsigned int start, end, tx_time;
        unsigned int tx_len = 0;
        int ret = 0;
        u32 reg;
 
        /* We don't support transfer larger than the FIFO */
        if (tfr->len > SUN4I_FIFO_DEPTH)
-               return -EINVAL;
+               return -EMSGSIZE;
+
+       if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+               return -EMSGSIZE;
 
        reinit_completion(&sspi->done);
        sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
        sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
        sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
 
-       /* Fill the TX FIFO */
-       sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+       /*
+        * Fill the TX FIFO
+        * Filling the FIFO fully causes timeout for some reason
+        * at least on spi2 on A10s
+        */
+       sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
 
        /* Enable the interrupts */
        sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
        reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
        sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
 
+       tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+       start = jiffies;
        timeout = wait_for_completion_timeout(&sspi->done,
-                                             msecs_to_jiffies(1000));
+                                             msecs_to_jiffies(tx_time));
+       end = jiffies;
        if (!timeout) {
+               dev_warn(&master->dev,
+                        "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+                        dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+                        jiffies_to_msecs(end - start), tx_time);
                ret = -ETIMEDOUT;
                goto out;
        }
index 42e2c4bd690a253b423f42b0dfc6d1fc7c379ae9..7fce79a60608873d42af3b4b5203b4a94942ade1 100644 (file)
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
 {
        struct sun6i_spi *sspi = spi_master_get_devdata(master);
        unsigned int mclk_rate, div, timeout;
+       unsigned int start, end, tx_time;
        unsigned int tx_len = 0;
        int ret = 0;
        u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
        reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
        sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
 
+       tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+       start = jiffies;
        timeout = wait_for_completion_timeout(&sspi->done,
-                                             msecs_to_jiffies(1000));
+                                             msecs_to_jiffies(tx_time));
+       end = jiffies;
        if (!timeout) {
+               dev_warn(&master->dev,
+                        "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+                        dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+                        jiffies_to_msecs(end - start), tx_time);
                ret = -ETIMEDOUT;
                goto out;
        }
index 443f664534e144fd388e2baba7f4d9bb49134772..29ea8d2f982498c4290e4afc15093165656e7897 100644 (file)
@@ -646,6 +646,13 @@ free_master:
 
 static int ti_qspi_remove(struct platform_device *pdev)
 {
+       struct ti_qspi *qspi = platform_get_drvdata(pdev);
+       int rc;
+
+       rc = spi_master_suspend(qspi->master);
+       if (rc)
+               return rc;
+
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
index a8f533af9ecaeffcd39143ca1379dd8186714179..ec12181822e659a508a977caf5e8acf43b7472ed 100644 (file)
@@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
                goto error_ret_mut;
        ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
        mutex_unlock(&st->lock);
-       if (ret)
+       if (ret < 0)
                goto error_ret;
        val = ret;
        if (base_freq > 0)
index 825da0769936ace09907977be92392dda5447407..9587fa86dc69589fc34aa0105584ef3b24add210 100644 (file)
@@ -21,7 +21,7 @@ static int ad7606_spi_read_block(struct device *dev,
 {
        struct spi_device *spi = to_spi_device(dev);
        int i, ret;
-       unsigned short *data;
+       unsigned short *data = buf;
        __be16 *bdata = buf;
 
        ret = spi_read(spi, buf, count * 2);
index 9f43976f4ef217f822916c5cead94abfeb764385..170ac980abcb84f1c4079b99187d6a18262ca1a4 100644 (file)
@@ -444,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev,
                st->settling_cycles = val;
 
                /* 2x, 4x handling, see datasheet */
-               if (val > 511)
-                       val = (val >> 1) | (1 << 9);
-               else if (val > 1022)
+               if (val > 1022)
                        val = (val >> 2) | (3 << 9);
+               else if (val > 511)
+                       val = (val >> 1) | (1 << 9);
 
                dat = cpu_to_be16(val);
                ret = ad5933_i2c_write(st->client,
index f856c4544eeaea7ac243e4a0fb2a6383524f60b0..51e0d32883ba7d3bbb5b626dcb6314a49d210dff 100644 (file)
@@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
                fsi = tty->driver_data;
        else
                fsi = tty->link->driver_data;
-       devpts_kill_index(fsi, tty->index);
-       devpts_release(fsi);
+
+       if (fsi) {
+               devpts_kill_index(fsi, tty->index);
+               devpts_release(fsi);
+       }
 }
 
 static const struct tty_operations ptm_unix98_ops = {
index f973bfce5d089256086b945fb37148fffebf902d..1e93a37e27f05091c6ade07f2798a514c55ae2b8 100644 (file)
@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
 
 static void do_compute_shiftstate(void)
 {
-       unsigned int i, j, k, sym, val;
+       unsigned int k, sym, val;
 
        shift_state = 0;
        memset(shift_down, 0, sizeof(shift_down));
 
-       for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
-               if (!key_down[i])
+       for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+               sym = U(key_maps[0][k]);
+               if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
                        continue;
 
-               k = i * BITS_PER_LONG;
-
-               for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
-                       if (!test_bit(k, key_down))
-                               continue;
+               val = KVAL(sym);
+               if (val == KVAL(K_CAPSSHIFT))
+                       val = KVAL(K_SHIFT);
 
-                       sym = U(key_maps[0][k]);
-                       if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
-                               continue;
-
-                       val = KVAL(sym);
-                       if (val == KVAL(K_CAPSSHIFT))
-                               val = KVAL(K_SHIFT);
-
-                       shift_down[val]++;
-                       shift_state |= (1 << val);
-               }
+               shift_down[val]++;
+               shift_state |= BIT(val);
        }
 }
 
index dc125322f48f9d8c958ff09ce6e0406e77d48882..5b0fe97c46ca9fb0bff6b66e0638226bf1b5781d 100644 (file)
@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
        vc->vc_complement_mask = 0;
        vc->vc_can_do_color = 0;
        vc->vc_panic_force_write = false;
+       vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
        vc->vc_sw->con_init(vc, init);
        if (!vc->vc_complement_mask)
                vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
index 9059b7dc185ee7dcb5e7d33d123337ff862fa6bd..2f537bbdda093d90285789cd92f77da608dbbe14 100644 (file)
@@ -21,6 +21,7 @@
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
@@ -450,3 +451,4 @@ int otg_statemachine(struct otg_fsm *fsm)
        return fsm->state_changed;
 }
 EXPORT_SYMBOL_GPL(otg_statemachine);
+MODULE_LICENSE("GPL");
index 34b837ae1ed79ebb909c47e3fa8c75345714b88b..d2e3f655c26f6ce16e4b63a82d7e83afbd15a524 100644 (file)
@@ -2598,26 +2598,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
  * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
  * deallocated.
  *
- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
- * freed.  When hcd_release() is called for either hcd in a peer set
- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
- * block new peering attempts
+ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
+ * freed.  When hcd_release() is called for either hcd in a peer set,
+ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
  */
 static void hcd_release(struct kref *kref)
 {
        struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
 
        mutex_lock(&usb_port_peer_mutex);
-       if (usb_hcd_is_primary_hcd(hcd)) {
-               kfree(hcd->address0_mutex);
-               kfree(hcd->bandwidth_mutex);
-       }
        if (hcd->shared_hcd) {
                struct usb_hcd *peer = hcd->shared_hcd;
 
                peer->shared_hcd = NULL;
-               if (peer->primary_hcd == hcd)
-                       peer->primary_hcd = NULL;
+               peer->primary_hcd = NULL;
+       } else {
+               kfree(hcd->address0_mutex);
+               kfree(hcd->bandwidth_mutex);
        }
        mutex_unlock(&usb_port_peer_mutex);
        kfree(hcd);
index 50d6ae6f88bc336a8631ce76f9c45c8de077b481..89a2f712fdfe32f5fc0a6fc0681b0d8db005e2a2 100644 (file)
@@ -233,7 +233,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
        dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
                 dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
 
-       dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
+       dwc3_data->rstc_pwrdn =
+               devm_reset_control_get_exclusive(dev, "powerdown");
        if (IS_ERR(dwc3_data->rstc_pwrdn)) {
                dev_err(&pdev->dev, "could not get power controller\n");
                ret = PTR_ERR(dwc3_data->rstc_pwrdn);
@@ -243,7 +244,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
        /* Manage PowerDown */
        reset_control_deassert(dwc3_data->rstc_pwrdn);
 
-       dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset");
+       dwc3_data->rstc_rst =
+               devm_reset_control_get_shared(dev, "softreset");
        if (IS_ERR(dwc3_data->rstc_rst)) {
                dev_err(&pdev->dev, "could not get reset controller\n");
                ret = PTR_ERR(dwc3_data->rstc_rst);
index a94ed677d93747c143d01879a175bad23f8f795f..be4a2788fc5828ea172c5c72f5e5f5b5822f052d 100644 (file)
@@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
                priv->clk48 = NULL;
        }
 
-       priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+       priv->pwr =
+               devm_reset_control_get_optional_shared(&dev->dev, "power");
        if (IS_ERR(priv->pwr)) {
                err = PTR_ERR(priv->pwr);
                if (err == -EPROBE_DEFER)
@@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
                priv->pwr = NULL;
        }
 
-       priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+       priv->rst =
+               devm_reset_control_get_optional_shared(&dev->dev, "softreset");
        if (IS_ERR(priv->rst)) {
                err = PTR_ERR(priv->rst);
                if (err == -EPROBE_DEFER)
index acf2eb2a56766358c2bbdef931b070c2c81ad43d..02816a1515a1dc4dc0ec06506855bf0fc57a799c 100644 (file)
@@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev)
                priv->clk48 = NULL;
        }
 
-       priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+       priv->pwr =
+               devm_reset_control_get_optional_shared(&dev->dev, "power");
        if (IS_ERR(priv->pwr)) {
                err = PTR_ERR(priv->pwr);
                goto err_put_clks;
        }
 
-       priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+       priv->rst =
+               devm_reset_control_get_optional_shared(&dev->dev, "softreset");
        if (IS_ERR(priv->rst)) {
                err = PTR_ERR(priv->rst);
                goto err_put_clks;
index 076970a54f894b80366da951f220a97203466bb7..4ce10bcca18b1f600c351675142240dbe94a4022 100644 (file)
@@ -423,36 +423,7 @@ upload:
 
        return 0;
 }
-static int __init check_prereq(void)
-{
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       if (!acpi_gbl_FADT.smi_command)
-               return -ENODEV;
-
-       if (c->x86_vendor == X86_VENDOR_INTEL) {
-               if (!cpu_has(c, X86_FEATURE_EST))
-                       return -ENODEV;
 
-               return 0;
-       }
-       if (c->x86_vendor == X86_VENDOR_AMD) {
-               /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
-                * as we get compile warnings for the static functions.
-                */
-#define CPUID_FREQ_VOLT_CAPABILITIES    0x80000007
-#define USE_HW_PSTATE                   0x00000080
-               u32 eax, ebx, ecx, edx;
-               cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-               if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
-                       return -ENODEV;
-               return 0;
-       }
-       return -ENODEV;
-}
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance __percpu *acpi_perf_data;
 
@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
 static int __init xen_acpi_processor_init(void)
 {
        unsigned int i;
-       int rc = check_prereq();
+       int rc;
 
-       if (rc)
-               return rc;
+       if (!xen_initial_domain())
+               return -ENODEV;
 
        nr_acpi_bits = get_max_acpi_id() + 1;
        acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
index cacf30d14747baa20d5f2d8a6a999ebdcba14018..7487971f9f788b12a637216c29cc853bb933c1ca 100644 (file)
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
                        rc = -ENOMEM;
                        goto out;
                }
+       } else {
+               list_for_each_entry(trans, &u->transactions, list)
+                       if (trans->handle.id == u->u.msg.tx_id)
+                               break;
+               if (&trans->list == &u->transactions)
+                       return -ESRCH;
        }
 
        reply = xenbus_dev_request_and_reply(&u->u.msg);
        if (IS_ERR(reply)) {
-               kfree(trans);
+               if (msg_type == XS_TRANSACTION_START)
+                       kfree(trans);
                rc = PTR_ERR(reply);
                goto out;
        }
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
                        list_add(&trans->list, &u->transactions);
                }
        } else if (u->u.msg.type == XS_TRANSACTION_END) {
-               list_for_each_entry(trans, &u->transactions, list)
-                       if (trans->handle.id == u->u.msg.tx_id)
-                               break;
-               BUG_ON(&trans->list == &u->transactions);
                list_del(&trans->list);
-
                kfree(trans);
        }
 
index 374b12af88127c2aef2f359ef3f13013ec548fd1..22f7cd711c5792e25eac035f0aa138c9992d9bd9 100644 (file)
@@ -232,10 +232,10 @@ static void transaction_resume(void)
 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 {
        void *ret;
-       struct xsd_sockmsg req_msg = *msg;
+       enum xsd_sockmsg_type type = msg->type;
        int err;
 
-       if (req_msg.type == XS_TRANSACTION_START)
+       if (type == XS_TRANSACTION_START)
                transaction_start();
 
        mutex_lock(&xs_state.request_mutex);
@@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 
        mutex_unlock(&xs_state.request_mutex);
 
-       if (IS_ERR(ret))
-               return ret;
-
        if ((msg->type == XS_TRANSACTION_END) ||
-           ((req_msg.type == XS_TRANSACTION_START) &&
-            (msg->type == XS_ERROR)))
+           ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
                transaction_end();
 
        return ret;
index b84c291ba1ebf38d0295b0dd919c794c28caa099..d7b78d531e63f99f6e2a350e081c9836918b64b5 100644 (file)
@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                                        v9fs_proto_dotu(v9ses));
        fid = file->private_data;
        if (!fid) {
-               fid = v9fs_fid_clone(file->f_path.dentry);
+               fid = v9fs_fid_clone(file_dentry(file));
                if (IS_ERR(fid))
                        return PTR_ERR(fid);
 
@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                 * because we want write after unlink usecase
                 * to work.
                 */
-               fid = v9fs_writeback_fid(file->f_path.dentry);
+               fid = v9fs_writeback_fid(file_dentry(file));
                if (IS_ERR(fid)) {
                        err = PTR_ERR(fid);
                        mutex_unlock(&v9inode->v_mutex);
@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
                 * because we want write after unlink usecase
                 * to work.
                 */
-               fid = v9fs_writeback_fid(filp->f_path.dentry);
+               fid = v9fs_writeback_fid(file_dentry(filp));
                if (IS_ERR(fid)) {
                        retval = PTR_ERR(fid);
                        mutex_unlock(&v9inode->v_mutex);
index f4645c51526290e0d22a1e2589dbe21bf8e7148b..e2e7c749925ad237c57758ed18186f5bed7d497b 100644 (file)
@@ -853,7 +853,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
        struct p9_fid *fid, *inode_fid;
        struct dentry *res = NULL;
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                res = v9fs_vfs_lookup(dir, dentry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index a34702c998f593f60515d72fcf093cd556f5d951..1b51eaa5e2dd05445d0ba3e9c33038bcfbe4d277 100644 (file)
@@ -254,7 +254,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        struct posix_acl *pacl = NULL, *dacl = NULL;
        struct dentry *res = NULL;
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                res = v9fs_vfs_lookup(dir, dentry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index 6e72c98162d5351a9ad27b581eb17737eb4a1203..1780218a48f0843e2fa0777831d958cb4c4e9a37 100644 (file)
@@ -95,10 +95,8 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
        }
 
        dentry = d_obtain_alias(inode);
-       if (IS_ERR(dentry)) {
-               iput(inode);
+       if (IS_ERR(dentry))
                return dentry;
-       }
        err = ceph_init_dentry(dentry);
        if (err < 0) {
                dput(dentry);
@@ -167,10 +165,8 @@ static struct dentry *__get_parent(struct super_block *sb,
                return ERR_PTR(-ENOENT);
 
        dentry = d_obtain_alias(inode);
-       if (IS_ERR(dentry)) {
-               iput(inode);
+       if (IS_ERR(dentry))
                return dentry;
-       }
        err = ceph_init_dentry(dentry);
        if (err < 0) {
                dput(dentry);
@@ -210,7 +206,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
 
        dout("fh_to_parent %llx\n", cfh->parent_ino);
        dentry = __get_parent(sb, NULL, cfh->ino);
-       if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT)
+       if (unlikely(dentry == ERR_PTR(-ENOENT)))
                dentry = __fh_to_dentry(sb, cfh->parent_ino);
        return dentry;
 }
index ce2f5795e44bc73d404817a884cd5ecb0664802d..0daaf7ceedc55f3769abb441a2477ea9fe4f0f75 100644 (file)
@@ -394,7 +394,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                dn = ceph_finish_lookup(req, dentry, err);
                if (IS_ERR(dn))
                        err = PTR_ERR(dn);
index 5a53ac6b1e02515be90a4e446b103aa9f6f26874..02b071bf3732ac29808183974636e2ac9ca56ce6 100644 (file)
@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_SLASH:
                *target = '\\';
                break;
+       case SFM_SPACE:
+               *target = ' ';
+               break;
+       case SFM_PERIOD:
+               *target = '.';
+               break;
        default:
                return false;
        }
@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
        return dest_char;
 }
 
-static __le16 convert_to_sfm_char(char src_char)
+static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
 {
        __le16 dest_char;
 
@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
        case '|':
                dest_char = cpu_to_le16(SFM_PIPE);
                break;
+       case '.':
+               if (end_of_string)
+                       dest_char = cpu_to_le16(SFM_PERIOD);
+               else
+                       dest_char = 0;
+               break;
+       case ' ':
+               if (end_of_string)
+                       dest_char = cpu_to_le16(SFM_SPACE);
+               else
+                       dest_char = 0;
+               break;
        default:
                dest_char = 0;
        }
@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                /* see if we must remap this char */
                if (map_chars == SFU_MAP_UNI_RSVD)
                        dst_char = convert_to_sfu_char(src_char);
-               else if (map_chars == SFM_MAP_UNI_RSVD)
-                       dst_char = convert_to_sfm_char(src_char);
-               else
+               else if (map_chars == SFM_MAP_UNI_RSVD) {
+                       bool end_of_string;
+
+                       if (i == srclen - 1)
+                               end_of_string = true;
+                       else
+                               end_of_string = false;
+
+                       dst_char = convert_to_sfm_char(src_char, end_of_string);
+               } else
                        dst_char = 0;
                /*
                 * FIXME: We can not handle remapping backslash (UNI_SLASH)
index bdc52cb9a676db0a9041d7e198810ce8df2aca9a..479bc0a941f35f79056dedc45ab99825669f1e6e 100644 (file)
@@ -64,6 +64,8 @@
 #define SFM_LESSTHAN    ((__u16) 0xF023)
 #define SFM_PIPE        ((__u16) 0xF027)
 #define SFM_SLASH       ((__u16) 0xF026)
+#define SFM_PERIOD     ((__u16) 0xF028)
+#define SFM_SPACE      ((__u16) 0xF029)
 
 /*
  * Mapping mechanism to use when one of the seven reserved characters is
index 5d8b7edf8a8f69009114e56e28c530fb4e7e686f..5d841f39c4b70e853e5b74d1cb6e01e7d112fd25 100644 (file)
@@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
 struct workqueue_struct        *cifsiod_wq;
+__u32 cifs_lock_secret;
 
 /*
  * Bumps refcount for cifs super block.
@@ -1266,6 +1267,8 @@ init_cifs(void)
        spin_lock_init(&cifs_file_list_lock);
        spin_lock_init(&GlobalMid_Lock);
 
+       get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
+
        if (cifs_max_pending < 2) {
                cifs_max_pending = 2;
                cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
index bba106cdc43c4c5703568ac82016714983e1d7bc..8f1d8c1e72bece412a926d0e20b9bdc95a7fa9ee 100644 (file)
@@ -1619,6 +1619,7 @@ void cifs_oplock_break(struct work_struct *work);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
+extern __u32 cifs_lock_secret;
 
 extern mempool_t *cifs_mid_poolp;
 
index 66736f57b5abdf785a724aab8701d620abbb7898..7d2b15c060909b103e96a90a330c6f0ed7ecd2d0 100644 (file)
@@ -428,7 +428,9 @@ cifs_echo_request(struct work_struct *work)
         * server->ops->need_neg() == true. Also, no need to ping if
         * we got a response recently.
         */
-       if (!server->ops->need_neg || server->ops->need_neg(server) ||
+
+       if (server->tcpStatus == CifsNeedReconnect ||
+           server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
            (server->ops->can_echo && !server->ops->can_echo(server)) ||
            time_before(jiffies, server->lstrp + echo_interval - HZ))
                goto requeue_echo;
index c3eb998a99bd18a2ed9b7b843c99be15fedab9df..fb0903fffc22c8738c34f958beb7784400a93339 100644 (file)
@@ -445,7 +445,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                 * Check for hashed negative dentry. We have already revalidated
                 * the dentry and it is fine. No need to perform another lookup.
                 */
-               if (!d_unhashed(direntry))
+               if (!d_in_lookup(direntry))
                        return -ENOENT;
 
                res = cifs_lookup(inode, direntry, 0);
index 9793ae0bcaa2bc678f9a688c167ef2b091cc858f..d4890b6dc22db0a4c7345b195e0209c5d32cf0b0 100644 (file)
@@ -1112,6 +1112,12 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        return rc;
 }
 
+static __u32
+hash_lockowner(fl_owner_t owner)
+{
+       return cifs_lock_secret ^ hash32_ptr((const void *)owner);
+}
+
 struct lock_to_push {
        struct list_head llist;
        __u64 offset;
@@ -1178,7 +1184,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
                else
                        type = CIFS_WRLCK;
                lck = list_entry(el, struct lock_to_push, llist);
-               lck->pid = flock->fl_pid;
+               lck->pid = hash_lockowner(flock->fl_owner);
                lck->netfid = cfile->fid.netfid;
                lck->length = length;
                lck->type = type;
@@ -1305,7 +1311,8 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
                        posix_lock_type = CIFS_RDLCK;
                else
                        posix_lock_type = CIFS_WRLCK;
-               rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
+               rc = CIFSSMBPosixLock(xid, tcon, netfid,
+                                     hash_lockowner(flock->fl_owner),
                                      flock->fl_start, length, flock,
                                      posix_lock_type, wait_flag);
                return rc;
@@ -1505,7 +1512,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                        posix_lock_type = CIFS_UNLCK;
 
                rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
-                                     current->tgid, flock->fl_start, length,
+                                     hash_lockowner(flock->fl_owner),
+                                     flock->fl_start, length,
                                      NULL, posix_lock_type, wait_flag);
                goto out;
        }
index 848249fa120fc270d751d02597107a20319cfa29..3079b38f0afbd197bc0fc9f27df086a92ab90fbf 100644 (file)
@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
 
 int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
 void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
+int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
                        struct cifs_ses *ses,
                        const struct nls_table *nls_cp);
index af0ec2d5ad0e9da25af4ec8528266451a42cc8b5..538d9b55699a10f1185b8df25a1f36f9a24ae75c 100644 (file)
@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        sec_blob->DomainName.MaximumLength = 0;
 }
 
-/* We do not malloc the blob, it is passed in pbuffer, because its
-   maximum possible size is fixed and small, making this approach cleaner.
-   This function returns the length of the data in the blob */
-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+static int size_of_ntlmssp_blob(struct cifs_ses *ses)
+{
+       int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
+               - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+
+       if (ses->domainName)
+               sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+       else
+               sz += 2;
+
+       if (ses->user_name)
+               sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+       else
+               sz += 2;
+
+       return sz;
+}
+
+int build_ntlmssp_auth_blob(unsigned char **pbuffer,
                                        u16 *buflen,
                                   struct cifs_ses *ses,
                                   const struct nls_table *nls_cp)
 {
        int rc;
-       AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
+       AUTHENTICATE_MESSAGE *sec_blob;
        __u32 flags;
        unsigned char *tmp;
 
+       rc = setup_ntlmv2_rsp(ses, nls_cp);
+       if (rc) {
+               cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+               *buflen = 0;
+               goto setup_ntlmv2_ret;
+       }
+       *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+       sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
+
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
 
@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                        flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
        }
 
-       tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+       tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
        sec_blob->LmChallengeResponse.BufferOffset =
@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->LmChallengeResponse.Length = 0;
        sec_blob->LmChallengeResponse.MaximumLength = 0;
 
-       sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->NtChallengeResponse.BufferOffset =
+                               cpu_to_le32(tmp - *pbuffer);
        if (ses->user_name != NULL) {
-               rc = setup_ntlmv2_rsp(ses, nls_cp);
-               if (rc) {
-                       cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
-                       goto setup_ntlmv2_ret;
-               }
                memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
                                ses->auth_key.len - CIFS_SESS_KEY_SIZE);
                tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
@@ -423,23 +443,23 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        }
 
        if (ses->domainName == NULL) {
-               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->DomainName.Length = 0;
                sec_blob->DomainName.MaximumLength = 0;
                tmp += 2;
        } else {
                int len;
                len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
-                                     CIFS_MAX_USERNAME_LEN, nls_cp);
+                                     CIFS_MAX_DOMAINNAME_LEN, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
                sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
                tmp += len;
        }
 
        if (ses->user_name == NULL) {
-               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->UserName.Length = 0;
                sec_blob->UserName.MaximumLength = 0;
                tmp += 2;
@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
                                      CIFS_MAX_USERNAME_LEN, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
                sec_blob->UserName.MaximumLength = cpu_to_le16(len);
                tmp += len;
        }
 
-       sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
        sec_blob->WorkstationName.Length = 0;
        sec_blob->WorkstationName.MaximumLength = 0;
        tmp += 2;
@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
                        && !calc_seckey(ses)) {
                memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
                sec_blob->SessionKey.MaximumLength =
                                cpu_to_le16(CIFS_CPHTXT_SIZE);
                tmp += CIFS_CPHTXT_SIZE;
        } else {
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = 0;
                sec_blob->SessionKey.MaximumLength = 0;
        }
 
+       *buflen = tmp - *pbuffer;
 setup_ntlmv2_ret:
-       *buflen = tmp - pbuffer;
        return rc;
 }
 
@@ -690,6 +710,8 @@ sess_auth_lanman(struct sess_data *sess_data)
                rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
                                      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
                                      true : false, lnm_session_key);
+               if (rc)
+                       goto out;
 
                memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
@@ -1266,7 +1288,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
        struct cifs_ses *ses = sess_data->ses;
        __u16 bytes_remaining;
        char *bcc_ptr;
-       char *ntlmsspblob = NULL;
+       unsigned char *ntlmsspblob = NULL;
        u16 blob_len;
 
        cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
@@ -1279,19 +1301,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
        /* Build security blob before we assemble the request */
        pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
        smb_buf = (struct smb_hdr *)pSMB;
-       /*
-        * 5 is an empirical value, large enough to hold
-        * authenticate message plus max 10 of av paris,
-        * domain, user, workstation names, flags, etc.
-        */
-       ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
-                               GFP_KERNEL);
-       if (!ntlmsspblob) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       rc = build_ntlmssp_auth_blob(ntlmsspblob,
+       rc = build_ntlmssp_auth_blob(&ntlmsspblob,
                                        &blob_len, ses, sess_data->nls_cp);
        if (rc)
                goto out_free_ntlmsspblob;
index 8f38e33d365bcca5ce9079dc64e5d10525df801c..29e06db5f187bea7d4f5ce29d2cf5c0faad6ae09 100644 (file)
@@ -588,7 +588,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
        u16 blob_length = 0;
        struct key *spnego_key = NULL;
        char *security_blob = NULL;
-       char *ntlmssp_blob = NULL;
+       unsigned char *ntlmssp_blob = NULL;
        bool use_spnego = false; /* else use raw ntlmssp */
 
        cifs_dbg(FYI, "Session Setup\n");
@@ -713,13 +713,7 @@ ssetup_ntlmssp_authenticate:
                iov[1].iov_len = blob_length;
        } else if (phase == NtLmAuthenticate) {
                req->hdr.SessionId = ses->Suid;
-               ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
-                                      GFP_KERNEL);
-               if (ntlmssp_blob == NULL) {
-                       rc = -ENOMEM;
-                       goto ssetup_exit;
-               }
-               rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
+               rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
                                             nls_cp);
                if (rc) {
                        cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
@@ -1818,6 +1812,33 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        cifs_dbg(FYI, "In echo request\n");
 
+       if (server->tcpStatus == CifsNeedNegotiate) {
+               struct list_head *tmp, *tmp2;
+               struct cifs_ses *ses;
+               struct cifs_tcon *tcon;
+
+               cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+               spin_lock(&cifs_tcp_ses_lock);
+               list_for_each(tmp, &server->smb_ses_list) {
+                       ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+                       list_for_each(tmp2, &ses->tcon_list) {
+                               tcon = list_entry(tmp2, struct cifs_tcon,
+                                                 tcon_list);
+                               /* add check for persistent handle reconnect */
+                               if (tcon && tcon->need_reconnect) {
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       rc = smb2_reconnect(SMB2_ECHO, tcon);
+                                       spin_lock(&cifs_tcp_ses_lock);
+                               }
+                       }
+               }
+               spin_unlock(&cifs_tcp_ses_lock);
+       }
+
+       /* if no session, renegotiate failed above */
+       if (server->tcpStatus == CifsNeedNegotiate)
+               return -EIO;
+
        rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
        if (rc)
                return rc;
index 33b7ee34eda5f135fef480f0bdf55bb4037ab334..bbc1252a59f5f1431ee779e16780f19a5f47a5d8 100644 (file)
@@ -357,8 +357,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
 
        len = simple_write_to_buffer(buffer->bin_buffer,
                        buffer->bin_buffer_size, ppos, buf, count);
-       if (len > 0)
-               *ppos += len;
 out:
        mutex_unlock(&buffer->mutex);
        return len;
index 761495bf5eb91d97c6483646d89a67f10933864f..e207f8f9b7007bfa9c9a44dd7d24cc1407bac098 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -208,7 +208,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
                                dax.addr += first;
                                size = map_len - first;
                        }
-                       max = min(pos + size, end);
+                       /*
+                        * pos + size is one past the last offset for IO,
+                        * so pos + size can overflow loff_t at extreme offsets.
+                        * Cast to u64 to catch this and get the true minimum.
+                        */
+                       max = min_t(u64, pos + size, end);
                }
 
                if (iov_iter_rw(iter) == WRITE) {
index 0d8eb3455b34d68cde59a48b5b46da9b67498445..e5e29f8c920b18bc6959cdb16d2ee57fec7d45a6 100644 (file)
@@ -45,7 +45,7 @@
  * ecryptfs_to_hex
  * @dst: Buffer to take hex character representation of contents of
  *       src; must be at least of size (src_size * 2)
- * @src: Buffer to be converted to a hex string respresentation
+ * @src: Buffer to be converted to a hex string representation
  * @src_size: number of bytes to convert
  */
 void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
@@ -60,7 +60,7 @@ void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
  * ecryptfs_from_hex
  * @dst: Buffer to take the bytes from src hex; must be at least of
  *       size (src_size / 2)
- * @src: Buffer to be converted from a hex string respresentation to raw value
+ * @src: Buffer to be converted from a hex string representation to raw value
  * @dst_size: size of dst buffer, or number of hex characters pairs to convert
  */
 void ecryptfs_from_hex(char *dst, char *src, int dst_size)
@@ -953,7 +953,7 @@ struct ecryptfs_cipher_code_str_map_elem {
 };
 
 /* Add support for additional ciphers by adding elements here. The
- * cipher_code is whatever OpenPGP applicatoins use to identify the
+ * cipher_code is whatever OpenPGP applications use to identify the
  * ciphers. List in order of probability. */
 static struct ecryptfs_cipher_code_str_map_elem
 ecryptfs_cipher_code_str_map[] = {
@@ -1410,7 +1410,7 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
  *
  * Common entry point for reading file metadata. From here, we could
  * retrieve the header information from the header region of the file,
- * the xattr region of the file, or some other repostory that is
+ * the xattr region of the file, or some other repository that is
  * stored separately from the file itself. The current implementation
  * supports retrieving the metadata information from the file contents
  * and from the xattr region.
index 7000b96b783ef04a56f056a83df595c8093533aa..ca4e83750214adc2b0ea77358937c167d02d6ba7 100644 (file)
@@ -169,9 +169,22 @@ out:
        return rc;
 }
 
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct file *lower_file = ecryptfs_file_to_lower(file);
+       /*
+        * Don't allow mmap on top of file systems that don't support it
+        * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
+        * allows recursive mounting, this will need to be extended.
+        */
+       if (!lower_file->f_op->mmap)
+               return -ENODEV;
+       return generic_file_mmap(file, vma);
+}
+
 /**
  * ecryptfs_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
  * @file: Structure to return filled in
  *
  * Opens the file specified by inode.
@@ -240,7 +253,7 @@ out:
 
 /**
  * ecryptfs_dir_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
  * @file: Structure to return filled in
  *
  * Opens the file specified by inode.
@@ -403,7 +416,7 @@ const struct file_operations ecryptfs_main_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .mmap = generic_file_mmap,
+       .mmap = ecryptfs_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
        .release = ecryptfs_release,
index e818f5ac7a2692b6bb5c1a132d7bbfd967ad956a..866bb18efefea9953250ba1cbdc145e7d4be49af 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/mount.h>
-#include <linux/file.h>
 #include "ecryptfs_kernel.h"
 
 struct ecryptfs_open_req {
@@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
        (*lower_file) = dentry_open(&req.path, flags, cred);
        if (!IS_ERR(*lower_file))
-               goto have_file;
+               goto out;
        if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
@@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
        mutex_unlock(&ecryptfs_kthread_ctl.mux);
        wake_up(&ecryptfs_kthread_ctl.wait);
        wait_for_completion(&req.done);
-       if (IS_ERR(*lower_file)) {
+       if (IS_ERR(*lower_file))
                rc = PTR_ERR(*lower_file);
-               goto out;
-       }
-have_file:
-       if ((*lower_file)->f_op->mmap == NULL) {
-               fput(*lower_file);
-               *lower_file = NULL;
-               rc = -EMEDIUMTYPE;
-       }
 out:
        return rc;
 }
index 1698132d0e576d4fea3690f56190242de33645fc..6120044951415d7840308eea25a21bd125085109 100644 (file)
@@ -738,8 +738,7 @@ static void ecryptfs_free_kmem_caches(void)
                struct ecryptfs_cache_info *info;
 
                info = &ecryptfs_cache_infos[i];
-               if (*(info->cache))
-                       kmem_cache_destroy(*(info->cache));
+               kmem_cache_destroy(*(info->cache));
        }
 }
 
index 989a2cef6b765023b670d75af4fb3bdbf6f650e6..fe7e83a45efffeb85329b194b03208e6223817e3 100644 (file)
@@ -483,9 +483,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
                goto out_free;
        }
        inode->i_state |= I_WB_SWITCH;
+       __iget(inode);
        spin_unlock(&inode->i_lock);
 
-       ihold(inode);
        isw->inode = inode;
 
        atomic_inc(&isw_nr_in_flight);
index ccd4971cc6c1ac787ede8998d73f3e68f3a1294a..cca7b048c07b26e4919769fed461b46771005a19 100644 (file)
@@ -341,8 +341,10 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        struct dentry *newent;
        bool outarg_valid = true;
 
+       fuse_lock_inode(dir);
        err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
                               &outarg, &inode);
+       fuse_unlock_inode(dir);
        if (err == -ENOENT) {
                outarg_valid = false;
                err = 0;
@@ -478,7 +480,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
        struct fuse_conn *fc = get_fuse_conn(dir);
        struct dentry *res = NULL;
 
-       if (d_unhashed(entry)) {
+       if (d_in_lookup(entry)) {
                res = fuse_lookup(dir, entry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
@@ -1341,7 +1343,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
                fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
                               FUSE_READDIR);
        }
+       fuse_lock_inode(inode);
        fuse_request_send(fc, req);
+       fuse_unlock_inode(inode);
        nbytes = req->out.args[0].size;
        err = req->out.h.error;
        fuse_put_request(fc, req);
index eddbe02c402892cc00970844021fd12512f133b4..929c383432b034f3f695e0cc83e55e9b48c35898 100644 (file)
@@ -110,6 +110,9 @@ struct fuse_inode {
 
        /** Miscellaneous bits describing inode state */
        unsigned long state;
+
+       /** Lock for serializing lookup and readdir for back compatibility*/
+       struct mutex mutex;
 };
 
 /** FUSE inode state bits */
@@ -540,6 +543,9 @@ struct fuse_conn {
        /** write-back cache policy (default is write-through) */
        unsigned writeback_cache:1;
 
+       /** allow parallel lookups and readdir (default is serialized) */
+       unsigned parallel_dirops:1;
+
        /*
         * The following bitfields are only for optimization purposes
         * and hence races in setting them will not cause malfunction
@@ -956,4 +962,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
 
 void fuse_set_initialized(struct fuse_conn *fc);
 
+void fuse_unlock_inode(struct inode *inode);
+void fuse_lock_inode(struct inode *inode);
+
 #endif /* _FS_FUSE_I_H */
index 1ce67668a8e17d2d721c456ed02865e01c01f7a2..9961d8432ce335ba445df4a36824cd12912f1419 100644 (file)
@@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        INIT_LIST_HEAD(&fi->queued_writes);
        INIT_LIST_HEAD(&fi->writepages);
        init_waitqueue_head(&fi->page_waitq);
+       mutex_init(&fi->mutex);
        fi->forget = fuse_alloc_forget();
        if (!fi->forget) {
                kmem_cache_free(fuse_inode_cachep, inode);
@@ -117,6 +118,7 @@ static void fuse_destroy_inode(struct inode *inode)
        struct fuse_inode *fi = get_fuse_inode(inode);
        BUG_ON(!list_empty(&fi->write_files));
        BUG_ON(!list_empty(&fi->queued_writes));
+       mutex_destroy(&fi->mutex);
        kfree(fi->forget);
        call_rcu(&inode->i_rcu, fuse_i_callback);
 }
@@ -351,6 +353,18 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
        return 0;
 }
 
+void fuse_lock_inode(struct inode *inode)
+{
+       if (!get_fuse_conn(inode)->parallel_dirops)
+               mutex_lock(&get_fuse_inode(inode)->mutex);
+}
+
+void fuse_unlock_inode(struct inode *inode)
+{
+       if (!get_fuse_conn(inode)->parallel_dirops)
+               mutex_unlock(&get_fuse_inode(inode)->mutex);
+}
+
 static void fuse_umount_begin(struct super_block *sb)
 {
        fuse_abort_conn(get_fuse_conn_super(sb));
@@ -898,6 +912,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->async_dio = 1;
                        if (arg->flags & FUSE_WRITEBACK_CACHE)
                                fc->writeback_cache = 1;
+                       if (arg->flags & FUSE_PARALLEL_DIROPS)
+                               fc->parallel_dirops = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
                } else {
@@ -928,7 +944,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
+               FUSE_PARALLEL_DIROPS;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
index 21dc784f66c2268d2e857314104847b600cc09de..9bad79fede37fb7e27023df1c1908e8a1783fb88 100644 (file)
@@ -1189,7 +1189,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
        struct dentry *d;
        bool excl = !!(flags & O_EXCL);
 
-       if (!d_unhashed(dentry))
+       if (!d_in_lookup(dentry))
                goto skip_lookup;
 
        d = __gfs2_lookup(dir, dentry, file, opened);
index cedeacbae303a9483fea836ba4a5cc24cb022f41..74dc8b9e7f53ab394113783c96fb754c045735fa 100644 (file)
@@ -84,6 +84,61 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 }
 EXPORT_SYMBOL(dcache_dir_close);
 
+/* parent is locked at least shared */
+static struct dentry *next_positive(struct dentry *parent,
+                                   struct list_head *from,
+                                   int count)
+{
+       unsigned *seq = &parent->d_inode->i_dir_seq, n;
+       struct dentry *res;
+       struct list_head *p;
+       bool skipped;
+       int i;
+
+retry:
+       i = count;
+       skipped = false;
+       n = smp_load_acquire(seq) & ~1;
+       res = NULL;
+       rcu_read_lock();
+       for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+               struct dentry *d = list_entry(p, struct dentry, d_child);
+               if (!simple_positive(d)) {
+                       skipped = true;
+               } else if (!--i) {
+                       res = d;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (skipped) {
+               smp_rmb();
+               if (unlikely(*seq != n))
+                       goto retry;
+       }
+       return res;
+}
+
+static void move_cursor(struct dentry *cursor, struct list_head *after)
+{
+       struct dentry *parent = cursor->d_parent;
+       unsigned n, *seq = &parent->d_inode->i_dir_seq;
+       spin_lock(&parent->d_lock);
+       for (;;) {
+               n = *seq;
+               if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
+                       break;
+               cpu_relax();
+       }
+       __list_del(cursor->d_child.prev, cursor->d_child.next);
+       if (after)
+               list_add(&cursor->d_child, after);
+       else
+               list_add_tail(&cursor->d_child, &parent->d_subdirs);
+       smp_store_release(seq, n + 2);
+       spin_unlock(&parent->d_lock);
+}
+
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 {
        struct dentry *dentry = file->f_path.dentry;
@@ -99,25 +154,14 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
        if (offset != file->f_pos) {
                file->f_pos = offset;
                if (file->f_pos >= 2) {
-                       struct list_head *p;
                        struct dentry *cursor = file->private_data;
+                       struct dentry *to;
                        loff_t n = file->f_pos - 2;
 
-                       spin_lock(&dentry->d_lock);
-                       /* d_lock not required for cursor */
-                       list_del(&cursor->d_child);
-                       p = dentry->d_subdirs.next;
-                       while (n && p != &dentry->d_subdirs) {
-                               struct dentry *next;
-                               next = list_entry(p, struct dentry, d_child);
-                               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-                               if (simple_positive(next))
-                                       n--;
-                               spin_unlock(&next->d_lock);
-                               p = p->next;
-                       }
-                       list_add_tail(&cursor->d_child, p);
-                       spin_unlock(&dentry->d_lock);
+                       inode_lock_shared(dentry->d_inode);
+                       to = next_positive(dentry, &dentry->d_subdirs, n);
+                       move_cursor(cursor, to ? &to->d_child : NULL);
+                       inode_unlock_shared(dentry->d_inode);
                }
        }
        return offset;
@@ -140,36 +184,25 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
        struct dentry *dentry = file->f_path.dentry;
        struct dentry *cursor = file->private_data;
-       struct list_head *p, *q = &cursor->d_child;
+       struct list_head *p = &cursor->d_child;
+       struct dentry *next;
+       bool moved = false;
 
        if (!dir_emit_dots(file, ctx))
                return 0;
-       spin_lock(&dentry->d_lock);
-       if (ctx->pos == 2)
-               list_move(q, &dentry->d_subdirs);
 
-       for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
-               struct dentry *next = list_entry(p, struct dentry, d_child);
-               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-               if (!simple_positive(next)) {
-                       spin_unlock(&next->d_lock);
-                       continue;
-               }
-
-               spin_unlock(&next->d_lock);
-               spin_unlock(&dentry->d_lock);
+       if (ctx->pos == 2)
+               p = &dentry->d_subdirs;
+       while ((next = next_positive(dentry, p, 1)) != NULL) {
                if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
                              d_inode(next)->i_ino, dt_type(d_inode(next))))
-                       return 0;
-               spin_lock(&dentry->d_lock);
-               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-               /* next is still alive */
-               list_move(q, p);
-               spin_unlock(&next->d_lock);
-               p = q;
+                       break;
+               moved = true;
+               p = &next->d_child;
                ctx->pos++;
        }
-       spin_unlock(&dentry->d_lock);
+       if (moved)
+               move_cursor(cursor, p);
        return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
index 154a107cd376238349ef6a889bb85ab0d23a0e31..fc4084ef4736d47a410e27052497cd00829204c2 100644 (file)
@@ -335,12 +335,17 @@ static struct notifier_block lockd_inet6addr_notifier = {
 };
 #endif
 
-static void lockd_svc_exit_thread(void)
+static void lockd_unregister_notifiers(void)
 {
        unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
 #endif
+}
+
+static void lockd_svc_exit_thread(void)
+{
+       lockd_unregister_notifiers();
        svc_exit_thread(nlmsvc_rqst);
 }
 
@@ -462,7 +467,7 @@ int lockd_up(struct net *net)
         * Note: svc_serv structures have an initial use count of 1,
         * so we exit through here on both success and failure.
         */
-err_net:
+err_put:
        svc_destroy(serv);
 err_create:
        mutex_unlock(&nlmsvc_mutex);
@@ -470,7 +475,9 @@ err_create:
 
 err_start:
        lockd_down_net(serv, net);
-       goto err_net;
+err_net:
+       lockd_unregister_notifiers();
+       goto err_put;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
index 7c5f91be9b65c4ddabe441d00bbda616ca0c2f26..ee1b15f6fc135c33e2e0564eb5e10db927080533 100644 (file)
@@ -1628,7 +1628,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
 {
        struct file_lock *fl, *my_fl = NULL, *lease;
        struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct file_lock_context *ctx;
        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
        int error;
index 783004af57077a9947a8f3a3228cbc8465a79b3e..419f746d851d1c1745fc0edca4a37459a4b17f35 100644 (file)
@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
                goto out_unlock;
 
        lock_mount_hash();
+       event++;
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
                if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
index aaf7bd0cbae20216333042c5a082a7560f6af61f..19d93d0cd400f5ac175a9d257ec8a8ced0e04c21 100644 (file)
@@ -424,12 +424,17 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc,
 static
 int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
 {
+       struct inode *inode;
        struct nfs_inode *nfsi;
 
        if (d_really_is_negative(dentry))
                return 0;
 
-       nfsi = NFS_I(d_inode(dentry));
+       inode = d_inode(dentry);
+       if (is_bad_inode(inode) || NFS_STALE(inode))
+               return 0;
+
+       nfsi = NFS_I(inode);
        if (entry->fattr->fileid == nfsi->fileid)
                return 1;
        if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
@@ -1363,7 +1368,6 @@ EXPORT_SYMBOL_GPL(nfs_dentry_operations);
 struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
 {
        struct dentry *res;
-       struct dentry *parent;
        struct inode *inode = NULL;
        struct nfs_fh *fhandle = NULL;
        struct nfs_fattr *fattr = NULL;
@@ -1393,7 +1397,6 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
        if (IS_ERR(label))
                goto out;
 
-       parent = dentry->d_parent;
        /* Protect against concurrent sillydeletes */
        trace_nfs_lookup_enter(dir, dentry, flags);
        error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
@@ -1482,11 +1485,13 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                    struct file *file, unsigned open_flags,
                    umode_t mode, int *opened)
 {
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
        struct nfs_open_context *ctx;
        struct dentry *res;
        struct iattr attr = { .ia_valid = ATTR_OPEN };
        struct inode *inode;
        unsigned int lookup_flags = 0;
+       bool switched = false;
        int err;
 
        /* Expect a negative dentry */
@@ -1501,7 +1506,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 
        /* NFS only supports OPEN on regular files */
        if ((open_flags & O_DIRECTORY)) {
-               if (!d_unhashed(dentry)) {
+               if (!d_in_lookup(dentry)) {
                        /*
                         * Hashed negative dentry with O_DIRECTORY: dentry was
                         * revalidated and is fine, no need to perform lookup
@@ -1525,6 +1530,17 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                attr.ia_size = 0;
        }
 
+       if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
+               d_drop(dentry);
+               switched = true;
+               dentry = d_alloc_parallel(dentry->d_parent,
+                                         &dentry->d_name, &wq);
+               if (IS_ERR(dentry))
+                       return PTR_ERR(dentry);
+               if (unlikely(!d_in_lookup(dentry)))
+                       return finish_no_open(file, dentry);
+       }
+
        ctx = create_nfs_open_context(dentry, open_flags);
        err = PTR_ERR(ctx);
        if (IS_ERR(ctx))
@@ -1536,9 +1552,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                err = PTR_ERR(inode);
                trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
                put_nfs_open_context(ctx);
+               d_drop(dentry);
                switch (err) {
                case -ENOENT:
-                       d_drop(dentry);
                        d_add(dentry, NULL);
                        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
                        break;
@@ -1560,14 +1576,23 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
        put_nfs_open_context(ctx);
 out:
+       if (unlikely(switched)) {
+               d_lookup_done(dentry);
+               dput(dentry);
+       }
        return err;
 
 no_open:
        res = nfs_lookup(dir, dentry, lookup_flags);
-       err = PTR_ERR(res);
+       if (switched) {
+               d_lookup_done(dentry);
+               if (!res)
+                       res = dentry;
+               else
+                       dput(dentry);
+       }
        if (IS_ERR(res))
-               goto out;
-
+               return PTR_ERR(res);
        return finish_no_open(file, res);
 }
 EXPORT_SYMBOL_GPL(nfs_atomic_open);
index 979b3c4dee6aedbb354faefd1aca8cf1b3be8789..c7326c2af2c3d33df7a96497c427664d90f7861f 100644 (file)
@@ -353,10 +353,12 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 
        result = wait_for_completion_killable(&dreq->completion);
 
+       if (!result) {
+               result = dreq->count;
+               WARN_ON_ONCE(dreq->count < 0);
+       }
        if (!result)
                result = dreq->error;
-       if (!result)
-               result = dreq->count;
 
 out:
        return (ssize_t) result;
@@ -386,8 +388,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
 
        if (dreq->iocb) {
                long res = (long) dreq->error;
-               if (!res)
+               if (dreq->count != 0) {
                        res = (long) dreq->count;
+                       WARN_ON_ONCE(dreq->count < 0);
+               }
                dreq->iocb->ki_complete(dreq->iocb, res, 0);
        }
 
index 52e7d6869e3b2445dc6417875b3c753edb681ecc..dda689d7a8a706862e21737cb5189ab175d976e1 100644 (file)
@@ -282,6 +282,7 @@ nfs_init_locked(struct inode *inode, void *opaque)
        struct nfs_fattr        *fattr = desc->fattr;
 
        set_nfs_fileid(inode, fattr->fileid);
+       inode->i_mode = fattr->mode;
        nfs_copy_fh(NFS_FH(inode), desc->fh);
        return 0;
 }
index de97567795a52c523abfa5936829303a388560e8..ff416d0e24bc25215a991c282e8ae19a65d31832 100644 (file)
@@ -2882,12 +2882,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                        call_close |= is_wronly;
                else if (is_wronly)
                        calldata->arg.fmode |= FMODE_WRITE;
+               if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
+                       call_close |= is_rdwr;
        } else if (is_rdwr)
                calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
 
-       if (calldata->arg.fmode == 0)
-               call_close |= is_rdwr;
-
        if (!nfs4_valid_open_stateid(state))
                call_close = 0;
        spin_unlock(&state->owner->so_lock);
@@ -7924,8 +7923,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                        break;
                }
                lo = NFS_I(inode)->layout;
-               if (lo && nfs4_stateid_match(&lgp->args.stateid,
-                                       &lo->plh_stateid)) {
+               if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
+                   nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
                        LIST_HEAD(head);
 
                        /*
@@ -7936,10 +7935,10 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                        pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
                        spin_unlock(&inode->i_lock);
                        pnfs_free_lseg_list(&head);
+                       status = -EAGAIN;
+                       goto out;
                } else
                        spin_unlock(&inode->i_lock);
-               status = -EAGAIN;
-               goto out;
        }
 
        status = nfs4_handle_exception(server, status, exception);
@@ -8036,7 +8035,10 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
                .flags = RPC_TASK_ASYNC,
        };
        struct pnfs_layout_segment *lseg = NULL;
-       struct nfs4_exception exception = { .timeout = *timeout };
+       struct nfs4_exception exception = {
+               .inode = inode,
+               .timeout = *timeout,
+       };
        int status = 0;
 
        dprintk("--> %s\n", __func__);
index 9679f47493640d4ce4a8a5e571727ece40e13625..834b875900d62addf6db7f6eb590ce5c2d1b09bc 100644 (file)
@@ -1488,9 +1488,9 @@ restart:
                                        }
                                        spin_unlock(&state->state_lock);
                                }
-                               nfs4_put_open_state(state);
                                clear_bit(NFS_STATE_RECLAIM_NOGRACE,
                                        &state->flags);
+                               nfs4_put_open_state(state);
                                spin_lock(&sp->so_lock);
                                goto restart;
                        }
index 0c7e0d45a4de6ee1fba11c40417d3fb01678049b..0fbe734cc38cb8d27ba2c8efaf985a676f4d0cd8 100644 (file)
@@ -361,8 +361,10 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
        list_del_init(&lseg->pls_list);
        /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
        atomic_dec(&lo->plh_refcount);
-       if (list_empty(&lo->plh_segs))
+       if (list_empty(&lo->plh_segs)) {
+               set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
                clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+       }
        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
 }
 
@@ -1290,6 +1292,7 @@ alloc_init_layout_hdr(struct inode *ino,
        INIT_LIST_HEAD(&lo->plh_bulk_destroy);
        lo->plh_inode = ino;
        lo->plh_lc_cred = get_rpccred(ctx->cred);
+       lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
        return lo;
 }
 
@@ -1297,6 +1300,8 @@ static struct pnfs_layout_hdr *
 pnfs_find_alloc_layout(struct inode *ino,
                       struct nfs_open_context *ctx,
                       gfp_t gfp_flags)
+       __releases(&ino->i_lock)
+       __acquires(&ino->i_lock)
 {
        struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *new = NULL;
@@ -1565,8 +1570,7 @@ lookup_again:
         * stateid, or it has been invalidated, then we must use the open
         * stateid.
         */
-       if (lo->plh_stateid.seqid == 0 ||
-           test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+       if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
 
                /*
                 * The first layoutget for the file. Need to serialize per
index 0dfc476da3e10918bcf0242d779cda343e6dac61..b38e3c0dc7908566acd1c8bc9d51402e8069ef6a 100644 (file)
@@ -247,7 +247,11 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
 }
 
 /* Helper function for pnfs_generic_commit_pagelist to catch an empty
- * page list. This can happen when two commits race. */
+ * page list. This can happen when two commits race.
+ *
+ * This must be called instead of nfs_init_commit - call one or the other, but
+ * not both!
+ */
 static bool
 pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
                                          struct nfs_commit_data *data,
@@ -256,7 +260,11 @@ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
        if (list_empty(pages)) {
                if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
                        wake_up_atomic_t(&cinfo->mds->rpcs_out);
-               nfs_commitdata_release(data);
+               /* don't call nfs_commitdata_release - it tries to put
+                * the open_context which is not acquired until nfs_init_commit
+                * which has not been called on @data */
+               WARN_ON_ONCE(data->context);
+               nfs_commit_free(data);
                return true;
        }
 
index 6776d7a7839e0e8afcd966d296678fbc0d69ca7d..572e5b3b06f1566f40e7df7be33b8f14aafd2e16 100644 (file)
@@ -367,13 +367,13 @@ readpage_async_filler(void *data, struct page *page)
                nfs_list_remove_request(new);
                nfs_readpage_release(new);
                error = desc->pgio->pg_error;
-               goto out_unlock;
+               goto out;
        }
        return 0;
 out_error:
        error = PTR_ERR(new);
-out_unlock:
        unlock_page(page);
+out:
        return error;
 }
 
index c2a6b08940228c838375ed9ef37c548c5052a2cc..5c9d2d80ff70bf851e835c97adf775bbac963641 100644 (file)
@@ -505,6 +505,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *upper;
        struct dentry *opaquedir = NULL;
        int err;
+       int flags = 0;
 
        if (WARN_ON(!workdir))
                return -EROFS;
@@ -534,46 +535,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        if (err)
                goto out_dput;
 
-       whiteout = ovl_whiteout(workdir, dentry);
-       err = PTR_ERR(whiteout);
-       if (IS_ERR(whiteout))
+       upper = lookup_one_len(dentry->d_name.name, upperdir,
+                              dentry->d_name.len);
+       err = PTR_ERR(upper);
+       if (IS_ERR(upper))
                goto out_unlock;
 
-       upper = ovl_dentry_upper(dentry);
-       if (!upper) {
-               upper = lookup_one_len(dentry->d_name.name, upperdir,
-                                      dentry->d_name.len);
-               err = PTR_ERR(upper);
-               if (IS_ERR(upper))
-                       goto kill_whiteout;
-
-               err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
-               dput(upper);
-               if (err)
-                       goto kill_whiteout;
-       } else {
-               int flags = 0;
+       err = -ESTALE;
+       if ((opaquedir && upper != opaquedir) ||
+           (!opaquedir && ovl_dentry_upper(dentry) &&
+            upper != ovl_dentry_upper(dentry))) {
+               goto out_dput_upper;
+       }
 
-               if (opaquedir)
-                       upper = opaquedir;
-               err = -ESTALE;
-               if (upper->d_parent != upperdir)
-                       goto kill_whiteout;
+       whiteout = ovl_whiteout(workdir, dentry);
+       err = PTR_ERR(whiteout);
+       if (IS_ERR(whiteout))
+               goto out_dput_upper;
 
-               if (is_dir)
-                       flags |= RENAME_EXCHANGE;
+       if (d_is_dir(upper))
+               flags = RENAME_EXCHANGE;
 
-               err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
-               if (err)
-                       goto kill_whiteout;
+       err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+       if (err)
+               goto kill_whiteout;
+       if (flags)
+               ovl_cleanup(wdir, upper);
 
-               if (is_dir)
-                       ovl_cleanup(wdir, upper);
-       }
        ovl_dentry_version_inc(dentry->d_parent);
 out_d_drop:
        d_drop(dentry);
        dput(whiteout);
+out_dput_upper:
+       dput(upper);
 out_unlock:
        unlock_rename(workdir, upperdir);
 out_dput:
index 1dbeab6cf96e54c7e43cf040ef3d909ce6fe2071..d1cdc60dd68fa25aa74e2474a09e07aab08b7e26 100644 (file)
@@ -59,16 +59,40 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
        if (err)
                goto out;
 
+       if (attr->ia_valid & ATTR_SIZE) {
+               struct inode *realinode = d_inode(ovl_dentry_real(dentry));
+
+               err = -ETXTBSY;
+               if (atomic_read(&realinode->i_writecount) < 0)
+                       goto out_drop_write;
+       }
+
        err = ovl_copy_up(dentry);
        if (!err) {
+               struct inode *winode = NULL;
+
                upperdentry = ovl_dentry_upper(dentry);
 
+               if (attr->ia_valid & ATTR_SIZE) {
+                       winode = d_inode(upperdentry);
+                       err = get_write_access(winode);
+                       if (err)
+                               goto out_drop_write;
+               }
+
+               if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+                       attr->ia_valid &= ~ATTR_MODE;
+
                inode_lock(upperdentry->d_inode);
                err = notify_change(upperdentry, attr, NULL);
                if (!err)
                        ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
+
+               if (winode)
+                       put_write_access(winode);
        }
+out_drop_write:
        ovl_drop_write(dentry);
 out:
        return err;
@@ -121,16 +145,18 @@ int ovl_permission(struct inode *inode, int mask)
 
                err = vfs_getattr(&realpath, &stat);
                if (err)
-                       return err;
+                       goto out_dput;
 
+               err = -ESTALE;
                if ((stat.mode ^ inode->i_mode) & S_IFMT)
-                       return -ESTALE;
+                       goto out_dput;
 
                inode->i_mode = stat.mode;
                inode->i_uid = stat.uid;
                inode->i_gid = stat.gid;
 
-               return generic_permission(inode, mask);
+               err = generic_permission(inode, mask);
+               goto out_dput;
        }
 
        /* Careful in RCU walk mode */
@@ -387,12 +413,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
        if (!inode)
                return NULL;
 
-       mode &= S_IFMT;
-
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_flags |= S_NOATIME | S_NOCMTIME;
 
+       mode &= S_IFMT;
        switch (mode) {
        case S_IFDIR:
                inode->i_private = oe;
index 4bd9b5ba8f42008b7f72cf271fac89c67b7eca6d..cfbca53590d078b89ca613e65c3b10486763f656 100644 (file)
@@ -187,6 +187,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 {
        to->i_uid = from->i_uid;
        to->i_gid = from->i_gid;
+       to->i_mode = from->i_mode;
 }
 
 /* dir.c */
index ce02f46029da7c3aa7c69be9291d62c81baeb642..9a7693d5f8fffb38bd4a07804e4627ed7f50361a 100644 (file)
@@ -1082,11 +1082,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        if (err < 0)
                                goto out_put_workdir;
 
-                       if (!err) {
-                               pr_err("overlayfs: upper fs needs to support d_type.\n");
-                               err = -EINVAL;
-                               goto out_put_workdir;
-                       }
+                       /*
+                        * We allowed this configuration and don't want to
+                        * break users over kernel upgrade. So warn instead
+                        * of erroring out.
+                        */
+                       if (!err)
+                               pr_warn("overlayfs: upper fs needs to support d_type.\n");
                }
        }
 
index dbca7375deefa3f7d2499f516697ffdd28178546..63a6ff2cfc6821961265136e1f62cf455da3b195 100644 (file)
@@ -1575,6 +1575,12 @@ xfs_ioc_swapext(
                goto out_put_tmp_file;
        }
 
+       if (f.file->f_op != &xfs_file_operations ||
+           tmp.file->f_op != &xfs_file_operations) {
+               error = -EINVAL;
+               goto out_put_tmp_file;
+       }
+
        ip = XFS_I(file_inode(f.file));
        tip = XFS_I(file_inode(tmp.file));
 
index 797ae2ec8eee2d129653514cea7b891db96e38d6..29c691265b49357bc0d036b71897348806c58e6c 100644 (file)
@@ -78,6 +78,7 @@
 
 /* ACPI PCI Interrupt Link (pci_link.c) */
 
+int acpi_irq_penalty_init(void);
 int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
                               int *polarity, char **name);
 int acpi_pci_link_free_irq(acpi_handle handle);
index 4e4c21491c4188a4401887feb55978fe58b1ff47..1ff3a76c265dbcbaea2f427359bb16c75e719200 100644 (file)
@@ -192,7 +192,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 /*
  * Optionally support group module level code.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
 
 /*
  * Optionally use 32-bit FADT addresses if and when there is a conflict
index 6a67ab94b553363934bc9c2e07eb12d6c8a977f7..081d0f258d4c98d3cb36615c2dd252b630052b99 100644 (file)
 
 #define INIT_TEXT                                                      \
        *(.init.text)                                                   \
+       *(.text.startup)                                                \
        MEM_DISCARD(init.text)
 
 #define EXIT_DATA                                                      \
        *(.exit.data)                                                   \
+       *(.fini_array)                                                  \
+       *(.dtors)                                                       \
        MEM_DISCARD(exit.data)                                          \
        MEM_DISCARD(exit.rodata)
 
 #define EXIT_TEXT                                                      \
        *(.exit.text)                                                   \
+       *(.text.exit)                                                   \
        MEM_DISCARD(exit.text)
 
 #define EXIT_CALL                                                      \
index c2fe2cffb809b6cb6208af9dde84c5d14aa5c555..d3778652e4629ad4765de4aa892a0ff950647b34 100644 (file)
@@ -52,7 +52,6 @@
 #include <linux/poll.h>
 #include <linux/ratelimit.h>
 #include <linux/sched.h>
-#include <linux/seqlock.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
@@ -87,6 +86,7 @@ struct drm_device_dma;
 struct drm_dma_handle;
 struct drm_gem_object;
 struct drm_master;
+struct drm_vblank_crtc;
 
 struct device_node;
 struct videomode;
@@ -684,35 +684,6 @@ struct drm_minor {
        struct mutex debugfs_lock; /* Protects debugfs_list. */
 };
 
-
-struct drm_pending_vblank_event {
-       struct drm_pending_event base;
-       unsigned int pipe;
-       struct drm_event_vblank event;
-};
-
-struct drm_vblank_crtc {
-       struct drm_device *dev;         /* pointer to the drm_device */
-       wait_queue_head_t queue;        /**< VBLANK wait queue */
-       struct timer_list disable_timer;                /* delayed disable timer */
-
-       seqlock_t seqlock;              /* protects vblank count and time */
-
-       u32 count;                      /* vblank counter */
-       struct timeval time;            /* vblank timestamp */
-
-       atomic_t refcount;              /* number of users of vblank interruptsper crtc */
-       u32 last;                       /* protected by dev->vbl_lock, used */
-                                       /* for wraparound handling */
-       u32 last_wait;                  /* Last vblank seqno waited per CRTC */
-       unsigned int inmodeset;         /* Display driver is setting mode */
-       unsigned int pipe;              /* crtc index */
-       int framedur_ns;                /* frame/field duration in ns */
-       int linedur_ns;                 /* line duration in ns */
-       bool enabled;                   /* so we don't call enable more than
-                                          once per disable */
-};
-
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
@@ -847,6 +818,8 @@ struct drm_device {
        int switch_power_state;
 };
 
+#include <drm/drm_irq.h>
+
 #define DRM_SWITCH_POWER_ON 0
 #define DRM_SWITCH_POWER_OFF 1
 #define DRM_SWITCH_POWER_CHANGING 2
@@ -933,56 +906,6 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
  * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
  */
 
-                               /* IRQ support (drm_irq.h) */
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
-
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
-                          struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
-                                         struct timeval *vblanktime);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
-                                      struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
-                                     struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-                                                unsigned int pipe, int *max_error,
-                                                struct timeval *vblank_time,
-                                                unsigned flags,
-                                                const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
-                                           const struct drm_display_mode *mode);
-
-/**
- * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
- * @crtc: which CRTC's vblank waitqueue to retrieve
- *
- * This function returns a pointer to the vblank waitqueue for the CRTC.
- * Drivers can use this to implement vblank waits using wait_event() & co.
- */
-static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
-{
-       return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
-}
-
 /* Modesetting support */
 extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
 extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
index 9e6ab4a0c274bec4c5679cabf1500a701a8186f0..3edeaf88ebc030cafcf3314c35a18bffbd4df3bc 100644 (file)
@@ -782,7 +782,10 @@ struct drm_crtc {
        struct drm_plane *primary;
        struct drm_plane *cursor;
 
-       /* position inside the mode_config.list, can be used as a [] idx */
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the CRTC.
+        */
        unsigned index;
 
        /* position of cursor plane on crtc */
@@ -1209,7 +1212,10 @@ struct drm_encoder {
        char *name;
        int encoder_type;
 
-       /* position inside the mode_config.list, can be used as a [] idx */
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the encoder.
+        */
        unsigned index;
 
        uint32_t possible_crtcs;
@@ -1240,7 +1246,6 @@ struct drm_encoder {
  * @head: list management
  * @base: base KMS object
  * @name: human readable name, can be overwritten by the driver
- * @connector_id: compacted connector id useful indexing arrays
  * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
  * @connector_type_id: index into connector type enum
  * @interlace_allowed: can this connector handle interlaced modes?
@@ -1297,7 +1302,15 @@ struct drm_connector {
        struct drm_mode_object base;
 
        char *name;
-       int connector_id;
+
+       /**
+        * @index: Compacted connector index, which matches the position inside
+        * the mode_config.list for drivers not supporting hot-add/removing. Can
+        * be used as an array index. It is invariant over the lifetime of the
+        * connector.
+        */
+       unsigned index;
+
        int connector_type;
        int connector_type_id;
        bool interlace_allowed;
@@ -1710,7 +1723,10 @@ struct drm_plane {
 
        enum drm_plane_type type;
 
-       /* position inside the mode_config.list, can be used as a [] idx */
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the plane.
+        */
        unsigned index;
 
        const struct drm_plane_helper_funcs *helper_private;
@@ -2318,8 +2334,6 @@ struct drm_mode_config_funcs {
  * @fb_lock: mutex to protect fb state and lists
  * @num_fb: number of fbs available
  * @fb_list: list of framebuffers available
- * @num_connector: number of connectors on this device
- * @connector_list: list of connector objects
  * @num_encoder: number of encoders on this device
  * @encoder_list: list of encoder objects
  * @num_overlay_plane: number of overlay planes on this device
@@ -2341,18 +2355,8 @@ struct drm_mode_config_funcs {
  * @property_blob_list: list of all the blob property objects
  * @blob_lock: mutex for blob property allocation and management
  * @*_property: core property tracking
- * @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
- *     gamma
- * @degamma_lut_size_property: size of the degamma LUT as supported by the
- *     driver (read-only)
- * @ctm_property: Matrix used to convert colors after the lookup in the
- *     degamma LUT
- * @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
- *     the gamma space of the connected screen (read-only)
- * @gamma_lut_size_property: size of the gamma LUT as supported by the driver
  * @preferred_depth: preferred RBG pixel depth, used by fb helpers
  * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @async_page_flip: does this device support async flips on the primary plane?
  * @cursor_width: hint to userspace for max cursor width
  * @cursor_height: hint to userspace for max cursor height
  * @helper_private: mid-layer private data
@@ -2394,8 +2398,17 @@ struct drm_mode_config {
        int num_fb;
        struct list_head fb_list;
 
+       /**
+        * @num_connector: Number of connectors on this device.
+        */
        int num_connector;
+       /**
+        * @connector_ida: ID allocator for connector indices.
+        */
        struct ida connector_ida;
+       /**
+        * @connector_list: List of connector objects.
+        */
        struct list_head connector_list;
        int num_encoder;
        struct list_head encoder_list;
@@ -2430,64 +2443,238 @@ struct drm_mode_config {
 
        /* pointers to standard properties */
        struct list_head property_blob_list;
+       /**
+        * @edid_property: Default connector property to hold the EDID of the
+        * currently connected sink, if any.
+        */
        struct drm_property *edid_property;
+       /**
+        * @dpms_property: Default connector property to control the
+        * connector's DPMS state.
+        */
        struct drm_property *dpms_property;
+       /**
+        * @path_property: Default connector property to hold the DP MST path
+        * for the port.
+        */
        struct drm_property *path_property;
+       /**
+        * @tile_property: Default connector property to store the tile
+        * position of a tiled screen, for sinks which need to be driven with
+        * multiple CRTCs.
+        */
        struct drm_property *tile_property;
+       /**
+        * @plane_type_property: Default plane property to differentiate
+        * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+        */
        struct drm_property *plane_type_property;
+       /**
+        * @rotation_property: Optional property for planes or CRTCs to specifiy
+        * rotation.
+        */
        struct drm_property *rotation_property;
+       /**
+        * @prop_src_x: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_x;
+       /**
+        * @prop_src_y: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_y;
+       /**
+        * @prop_src_w: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_w;
+       /**
+        * @prop_src_h: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_h;
+       /**
+        * @prop_crtc_x: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_x;
+       /**
+        * @prop_crtc_y: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_y;
+       /**
+        * @prop_crtc_w: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_w;
+       /**
+        * @prop_crtc_h: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_h;
+       /**
+        * @prop_fb_id: Default atomic plane property to specify the
+        * &drm_framebuffer.
+        */
        struct drm_property *prop_fb_id;
+       /**
+        * @prop_crtc_id: Default atomic plane property to specify the
+        * &drm_crtc.
+        */
        struct drm_property *prop_crtc_id;
+       /**
+        * @prop_active: Default atomic CRTC property to control the active
+        * state, which is the simplified implementation for DPMS in atomic
+        * drivers.
+        */
        struct drm_property *prop_active;
+       /**
+        * @prop_mode_id: Default atomic CRTC property to set the mode for a
+        * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+        * connectors must be of and active must be set to disabled, too.
+        */
        struct drm_property *prop_mode_id;
 
-       /* DVI-I properties */
+       /**
+        * @dvi_i_subconnector_property: Optional DVI-I property to
+        * differentiate between analog or digital mode.
+        */
        struct drm_property *dvi_i_subconnector_property;
+       /**
+        * @dvi_i_select_subconnector_property: Optional DVI-I property to
+        * select between analog or digital mode.
+        */
        struct drm_property *dvi_i_select_subconnector_property;
 
-       /* TV properties */
+       /**
+        * @tv_subconnector_property: Optional TV property to differentiate
+        * between different TV connector types.
+        */
        struct drm_property *tv_subconnector_property;
+       /**
+        * @tv_select_subconnector_property: Optional TV property to select
+        * between different TV connector types.
+        */
        struct drm_property *tv_select_subconnector_property;
+       /**
+        * @tv_mode_property: Optional TV property to select
+        * the output TV mode.
+        */
        struct drm_property *tv_mode_property;
+       /**
+        * @tv_left_margin_property: Optional TV property to set the left
+        * margin.
+        */
        struct drm_property *tv_left_margin_property;
+       /**
+        * @tv_right_margin_property: Optional TV property to set the right
+        * margin.
+        */
        struct drm_property *tv_right_margin_property;
+       /**
+        * @tv_top_margin_property: Optional TV property to set the right
+        * margin.
+        */
        struct drm_property *tv_top_margin_property;
+       /**
+        * @tv_bottom_margin_property: Optional TV property to set the right
+        * margin.
+        */
        struct drm_property *tv_bottom_margin_property;
+       /**
+        * @tv_brightness_property: Optional TV property to set the
+        * brightness.
+        */
        struct drm_property *tv_brightness_property;
+       /**
+        * @tv_contrast_property: Optional TV property to set the
+        * contrast.
+        */
        struct drm_property *tv_contrast_property;
+       /**
+        * @tv_flicker_reduction_property: Optional TV property to control the
+        * flicker reduction mode.
+        */
        struct drm_property *tv_flicker_reduction_property;
+       /**
+        * @tv_overscan_property: Optional TV property to control the overscan
+        * setting.
+        */
        struct drm_property *tv_overscan_property;
+       /**
+        * @tv_saturation_property: Optional TV property to set the
+        * saturation.
+        */
        struct drm_property *tv_saturation_property;
+       /**
+        * @tv_hue_property: Optional TV property to set the hue.
+        */
        struct drm_property *tv_hue_property;
 
-       /* Optional properties */
+       /**
+        * @scaling_mode_property: Optional connector property to control the
+        * upscaling, mostly used for built-in panels.
+        */
        struct drm_property *scaling_mode_property;
+       /**
+        * @aspect_ratio_property: Optional connector property to control the
+        * HDMI infoframe aspect ratio setting.
+        */
        struct drm_property *aspect_ratio_property;
+       /**
+        * @dirty_info_property: Optional connector property to give userspace a
+        * hint that the DIRTY_FB ioctl should be used.
+        */
        struct drm_property *dirty_info_property;
 
-       /* Optional color correction properties */
+       /**
+        * @degamma_lut_property: Optional CRTC property to set the LUT used to
+        * convert the framebuffer's colors to linear gamma.
+        */
        struct drm_property *degamma_lut_property;
+       /**
+        * @degamma_lut_size_property: Optional CRTC property for the size of
+        * the degamma LUT as supported by the driver (read-only).
+        */
        struct drm_property *degamma_lut_size_property;
+       /**
+        * @ctm_property: Optional CRTC property to set the
+        * matrix used to convert colors after the lookup in the
+        * degamma LUT.
+        */
        struct drm_property *ctm_property;
+       /**
+        * @gamma_lut_property: Optional CRTC property to set the LUT used to
+        * convert the colors, after the CTM matrix, to the gamma space of the
+        * connected screen.
+        */
        struct drm_property *gamma_lut_property;
+       /**
+        * @gamma_lut_size_property: Optional CRTC property for the size of the
+        * gamma LUT as supported by the driver (read-only).
+        */
        struct drm_property *gamma_lut_size_property;
 
-       /* properties for virtual machine layout */
+       /**
+        * @suggested_x_property: Optional connector property with a hint for
+        * the position of the output on the host's screen.
+        */
        struct drm_property *suggested_x_property;
+       /**
+        * @suggested_y_property: Optional connector property with a hint for
+        * the position of the output on the host's screen.
+        */
        struct drm_property *suggested_y_property;
 
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
-       /* whether async page flip is supported or not */
+       /**
+        * @async_page_flip: Does this device support async flips on the primary
+        * plane?
+        */
        bool async_page_flip;
 
        /**
@@ -2584,12 +2771,9 @@ void drm_connector_unregister(struct drm_connector *connector);
 extern void drm_connector_cleanup(struct drm_connector *connector);
 static inline unsigned drm_connector_index(struct drm_connector *connector)
 {
-       return connector->connector_id;
+       return connector->index;
 }
 
-/* helpers to {un}register all connectors from sysfs for device */
-extern void drm_connector_unregister_all(struct drm_device *dev);
-
 extern __printf(5, 6)
 int drm_encoder_init(struct drm_device *dev,
                     struct drm_encoder *encoder,
index 4d85cf2874af35f0c2cd069663972e1ecd4f8e89..72dee1213268100402fdd91313c353b709507ef8 100644 (file)
@@ -747,7 +747,14 @@ struct drm_dp_aux {
        struct mutex hw_mutex;
        ssize_t (*transfer)(struct drm_dp_aux *aux,
                            struct drm_dp_aux_msg *msg);
-       unsigned i2c_nack_count, i2c_defer_count;
+       /**
+        * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+        */
+       unsigned i2c_nack_count;
+       /**
+        * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+        */
+       unsigned i2c_defer_count;
 };
 
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
index fdb47051d5492a4db126cc525667bd181343d2cd..003207670597095a7a5e7b3b06b177992fbd6e40 100644 (file)
@@ -87,7 +87,15 @@ struct drm_dp_mst_port {
        struct drm_connector *connector;
        struct drm_dp_mst_topology_mgr *mgr;
 
-       struct edid *cached_edid; /* for DP logical ports - make tiling work */
+       /**
+        * @cached_edid: for DP logical ports - make tiling work by ensuring
+        * that the EDID for all connectors is read immediately.
+        */
+       struct edid *cached_edid;
+       /**
+        * @has_audio: Tracks whether the sink connector to this port is
+        * audio-capable.
+        */
        bool has_audio;
 };
 
@@ -397,71 +405,154 @@ struct drm_dp_payload {
 
 /**
  * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
- * @dev: device pointer for adding i2c devices etc.
- * @cbs: callbacks for connector addition and destruction.
- * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
- * @aux: aux channel for the DP connector.
- * @max_payloads: maximum number of payloads the GPU can generate.
- * @conn_base_id: DRM connector ID this mgr is connected to.
- * @down_rep_recv: msg receiver state for down replies.
- * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, dpcd.
- * @mst_state: if this manager is enabled for an MST capable port.
- * @mst_primary: pointer to the primary branch device.
- * @dpcd: cache of DPCD for primary port.
- * @pbn_div: PBN to slots divisor.
  *
  * This struct represents the toplevel displayport MST topology manager.
  * There should be one instance of this for every MST capable DP connector
  * on the GPU.
  */
 struct drm_dp_mst_topology_mgr {
-
+       /**
+        * @dev: device pointer for adding i2c devices etc.
+        */
        struct device *dev;
+       /**
+        * @cbs: callbacks for connector addition and destruction.
+        */
        const struct drm_dp_mst_topology_cbs *cbs;
+       /**
+        * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
+        * in one go.
+        */
        int max_dpcd_transaction_bytes;
-       struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+       /**
+        * @aux: AUX channel for the DP MST connector this topolgy mgr is
+        * controlling.
+        */
+       struct drm_dp_aux *aux;
+       /**
+        * @max_payloads: maximum number of payloads the GPU can generate.
+        */
        int max_payloads;
+       /**
+        * @conn_base_id: DRM connector ID this mgr is connected to. Only used
+        * to build the MST connector path value.
+        */
        int conn_base_id;
 
-       /* only ever accessed from the workqueue - which should be serialised */
+       /**
+        * @down_rep_recv: Message receiver state for down replies. This and
+        * @up_req_recv are only ever access from the work item, which is
+        * serialised.
+        */
        struct drm_dp_sideband_msg_rx down_rep_recv;
+       /**
+        * @up_req_recv: Message receiver state for up requests. This and
+        * @down_rep_recv are only ever access from the work item, which is
+        * serialised.
+        */
        struct drm_dp_sideband_msg_rx up_req_recv;
 
-       /* pointer to info about the initial MST device */
-       struct mutex lock; /* protects mst_state + primary + dpcd */
+       /**
+        * @lock: protects mst state, primary, dpcd.
+        */
+       struct mutex lock;
 
+       /**
+        * @mst_state: If this manager is enabled for an MST capable port. False
+        * if no MST sink/branch devices is connected.
+        */
        bool mst_state;
+       /**
+        * @mst_primary: Pointer to the primary/first branch device.
+        */
        struct drm_dp_mst_branch *mst_primary;
 
+       /**
+        * @dpcd: Cache of DPCD for primary port.
+        */
        u8 dpcd[DP_RECEIVER_CAP_SIZE];
+       /**
+        * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+        */
        u8 sink_count;
+       /**
+        * @pbn_div: PBN to slots divisor.
+        */
        int pbn_div;
+       /**
+        * @total_slots: Total slots that can be allocated.
+        */
        int total_slots;
+       /**
+        * @avail_slots: Still available slots that can be allocated.
+        */
        int avail_slots;
+       /**
+        * @total_pbn: Total PBN count.
+        */
        int total_pbn;
 
-       /* messages to be transmitted */
-       /* qlock protects the upq/downq and in_progress,
-          the mstb tx_slots and txmsg->state once they are queued */
+       /**
+        * @qlock: protects @tx_msg_downq, the tx_slots in struct
+        * &drm_dp_mst_branch and txmsg->state once they are queued
+        */
        struct mutex qlock;
+       /**
+        * @tx_msg_downq: List of pending down replies.
+        */
        struct list_head tx_msg_downq;
-       bool tx_down_in_progress;
 
-       /* payload info + lock for it */
+       /**
+        * @payload_lock: Protect payload information.
+        */
        struct mutex payload_lock;
+       /**
+        * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
+        * VCPI structure itself is embedded into the corresponding
+        * &drm_dp_mst_port structure.
+        */
        struct drm_dp_vcpi **proposed_vcpis;
+       /**
+        * @payloads: Array of payloads.
+        */
        struct drm_dp_payload *payloads;
+       /**
+        * @payload_mask: Elements of @payloads actually in use. Since
+        * reallocation of active outputs isn't possible gaps can be created by
+        * disabling outputs out of order compared to how they've been enabled.
+        */
        unsigned long payload_mask;
+       /**
+        * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
+        */
        unsigned long vcpi_mask;
 
+       /**
+        * @tx_waitq: Wait to queue stall for the tx worker.
+        */
        wait_queue_head_t tx_waitq;
+       /**
+        * @work: Probe work.
+        */
        struct work_struct work;
-
+       /**
+        * @tx_work: Sideband transmit worker. This can nest within the main
+        * @work worker for each transaction @work launches.
+        */
        struct work_struct tx_work;
 
+       /**
+        * @destroy_connector_list: List of to be destroyed connectors.
+        */
        struct list_head destroy_connector_list;
+       /**
+        * @destroy_connector_lock: Protects @connector_list.
+        */
        struct mutex destroy_connector_lock;
+       /**
+        * @destroy_connector_work: Work item to destroy connectors. Needed to
+        * avoid locking inversion.
+        */
        struct work_struct destroy_connector_work;
 };
 
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
new file mode 100644 (file)
index 0000000..2401b14
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_IRQ_H_
+#define _DRM_IRQ_H_
+
+#include <linux/seqlock.h>
+
+/**
+ * struct drm_pending_vblank_event - pending vblank event tracking
+ */
+struct drm_pending_vblank_event {
+       /**
+        * @base: Base structure for tracking pending DRM events.
+        */
+       struct drm_pending_event base;
+       /**
+        * @pipe: drm_crtc_index() of the &drm_crtc this event is for.
+        */
+       unsigned int pipe;
+       /**
+        * @event: Actual event which will be sent to userspace.
+        */
+       struct drm_event_vblank event;
+};
+
+/**
+ * struct drm_vblank_crtc - vblank tracking for a CRTC
+ *
+ * This structure tracks the vblank state for one CRTC.
+ *
+ * Note that for historical reasons - the vblank handling code is still shared
+ * with legacy/non-kms drivers - this is a free-standing structure not directly
+ * connected to struct &drm_crtc. But all public interface functions are taking
+ * a struct &drm_crtc to hide this implementation detail.
+ */
+struct drm_vblank_crtc {
+       /**
+        * @dev: Pointer to the &drm_device.
+        */
+       struct drm_device *dev;
+       /**
+        * @queue: Wait queue for vblank waiters.
+        */
+       wait_queue_head_t queue;        /**< VBLANK wait queue */
+       /**
+        * @disable_timer: Disable timer for the delayed vblank disabling
+        * hysteresis logic. Vblank disabling is controlled through the
+        * drm_vblank_offdelay module option and the setting of the
+        * max_vblank_count value in the &drm_device structure.
+        */
+       struct timer_list disable_timer;
+
+       /**
+        * @seqlock: Protect vblank count and time.
+        */
+       seqlock_t seqlock;              /* protects vblank count and time */
+
+       /**
+        * @count: Current software vblank counter.
+        */
+       u32 count;
+       /**
+        * @time: Vblank timestamp corresponding to @count.
+        */
+       struct timeval time;
+
+       /**
+        * @refcount: Number of users/waiters of the vblank interrupt. Only when
+        * this refcount reaches 0 can the hardware interrupt be disabled using
+        * @disable_timer.
+        */
+       atomic_t refcount;              /* number of users of vblank interruptsper crtc */
+       /**
+        * @last: Protected by dev->vbl_lock, used for wraparound handling.
+        */
+       u32 last;
+       /**
+        * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
+        * For legacy driver bit 2 additionally tracks whether an additional
+        * temporary vblank reference has been acquired to paper over the
+        * hardware counter resetting/jumping. KMS drivers should instead just
+        * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
+        * save and restore the vblank count.
+        */
+       unsigned int inmodeset;         /* Display driver is setting mode */
+       /**
+        * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
+        * structure.
+        */
+       unsigned int pipe;
+       /**
+        * @framedur_ns: Frame/Field duration in ns, used by
+        * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+        * drm_calc_timestamping_constants().
+        */
+       int framedur_ns;
+       /**
+        * @linedur_ns: Line duration in ns, used by
+        * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+        * drm_calc_timestamping_constants().
+        */
+       int linedur_ns;
+       /**
+        * @enabled: Tracks the enabling state of the corresponding &drm_crtc to
+        * avoid double-disabling and hence corrupting saved state. Needed by
+        * drivers not using atomic KMS, since those might go through their CRTC
+        * disabling functions multiple times.
+        */
+       bool enabled;
+};
+
+extern int drm_irq_install(struct drm_device *dev, int irq);
+extern int drm_irq_uninstall(struct drm_device *dev);
+
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+                          struct drm_file *filp);
+extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+                                         struct timeval *vblanktime);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+                                      struct drm_pending_vblank_event *e);
+extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+                                     struct drm_pending_vblank_event *e);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
+extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+                                                unsigned int pipe, int *max_error,
+                                                struct timeval *vblank_time,
+                                                unsigned flags,
+                                                const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+                                           const struct drm_display_mode *mode);
+
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() and related
+ * functions.
+ */
+static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+       return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
+
+#endif
index 97aaf5c3d45b8406acb46e175e442aa80e6a9b1f..6f2c59887ba6d920527475416650976247a2794a 100644 (file)
@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
  */
 extern int ttm_bo_wait(struct ttm_buffer_object *bo,
                       bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
+ *
+ * @placement:  Return immediately if buffer is busy.
+ * @mem:  The struct ttm_mem_reg indicating the region where the bo resides
+ * @new_flags: Describes compatible placement found
+ *
+ * Returns true if the placement is compatible
+ */
+extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
+                             struct ttm_mem_reg *mem,
+                             uint32_t *new_flags);
+
 /**
  * ttm_bo_validate
  *
index e2ebe6666e2beb7fc28d234341413140f67fcfba..4348d6d5877a213b95c4b4e546287c9032b44a1a 100644 (file)
@@ -627,6 +627,15 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
  */
 extern void ttm_tt_destroy(struct ttm_tt *ttm);
 
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+extern void ttm_tt_unbind(struct ttm_tt *ttm);
+
 /**
  * ttm_tt_swapin:
  *
index fe389ac3148915416c4fe7dba63a6b5159a3bc3d..92e7e97ca8ff0f95ef9dba95b3e7078dd8c73049 100644 (file)
 #ifndef __ASM_ARM_KVM_PMU_H
 #define __ASM_ARM_KVM_PMU_H
 
-#ifdef CONFIG_KVM_ARM_PMU
-
 #include <linux/perf_event.h>
 #include <asm/perf_event.h>
 
 #define ARMV8_PMU_CYCLE_IDX            (ARMV8_PMU_MAX_COUNTERS - 1)
 
+#ifdef CONFIG_KVM_ARM_PMU
+
 struct kvm_pmc {
        u8 idx; /* index into the pmu->pmc array */
        struct perf_event *perf_event;
index 961a417d641e5221264e75b4d987fc3015607d41..e38e3fc13ea8764a66d4c84a6dea1bee3f4e630b 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <uapi/linux/audit.h>
-#include <linux/tty.h>
 
 #define AUDIT_INO_UNSET ((unsigned long)-1)
 #define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -348,23 +347,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
        return tsk->sessionid;
 }
 
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
-       struct tty_struct *tty = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tsk->sighand->siglock, flags);
-       if (tsk->signal)
-               tty = tty_kref_get(tsk->signal->tty);
-       spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
-       return tty;
-}
-
-static inline void audit_put_tty(struct tty_struct *tty)
-{
-       tty_kref_put(tty);
-}
-
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern void __audit_bprm(struct linux_binprm *bprm);
@@ -522,12 +504,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
 {
        return -1;
 }
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
-       return NULL;
-}
-static inline void audit_put_tty(struct tty_struct *tty)
-{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
index e6b41f42602ba7cc32e2945e15309d06f3028f95..3db25df396cbb88d2135874818f4fb38a71b2eee 100644 (file)
@@ -159,6 +159,7 @@ struct bcma_host_ops {
 #define BCMA_CORE_DEFAULT              0xFFF
 
 #define BCMA_MAX_NR_CORES              16
+#define BCMA_CORE_SIZE                 0x1000
 
 /* Chip IDs of PCIe devices */
 #define BCMA_CHIP_ID_BCM4313   0x4313
index 8ee27b8afe81c5d45b66f7b629c976ed8138d26a..0de4de6dd43e09aae40a72500b0e5a2fb9e89c1e 100644 (file)
@@ -111,6 +111,31 @@ enum bpf_access_type {
        BPF_WRITE = 2
 };
 
+/* types of values stored in eBPF registers */
+enum bpf_reg_type {
+       NOT_INIT = 0,            /* nothing was written into register */
+       UNKNOWN_VALUE,           /* reg doesn't contain a valid pointer */
+       PTR_TO_CTX,              /* reg points to bpf_context */
+       CONST_PTR_TO_MAP,        /* reg points to struct bpf_map */
+       PTR_TO_MAP_VALUE,        /* reg points to map element value */
+       PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+       FRAME_PTR,               /* reg == frame_pointer */
+       PTR_TO_STACK,            /* reg == frame_pointer + imm */
+       CONST_IMM,               /* constant integer value */
+
+       /* PTR_TO_PACKET represents:
+        * skb->data
+        * skb->data + imm
+        * skb->data + (u16) var
+        * skb->data + (u16) var + imm
+        * if (range > 0) then [ptr, ptr + range - off) is safe to access
+        * if (id > 0) means that some 'var' was added
+        * if (off > 0) menas that 'imm' was added
+        */
+       PTR_TO_PACKET,
+       PTR_TO_PACKET_END,       /* skb->data + headlen */
+};
+
 struct bpf_prog;
 
 struct bpf_verifier_ops {
@@ -120,7 +145,8 @@ struct bpf_verifier_ops {
        /* return true if 'size' wide access at offset 'off' within bpf_context
         * with 'type' (read or write) is allowed
         */
-       bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+       bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+                               enum bpf_reg_type *reg_type);
 
        u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
                                  int src_reg, int ctx_off,
@@ -238,6 +264,10 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 static inline void bpf_prog_put(struct bpf_prog *prog)
 {
 }
+
+static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 /* verifier prototypes for helper functions called from eBPF programs */
index 6fc31ef1da2d8e9efe5d91b8e4349ed4fe0dd809..8f74f3d61894a4ad58b5cc3457f68f318a84c970 100644 (file)
@@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 }
 #endif /* CONFIG_DEBUG_SET_MODULE_RONX */
 
-int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+       return sk_filter_trim_cap(sk, skb, 1);
+}
 
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
index 419fb9e03447aff8aef55934e89bbd844a28d7e7..f0a7a0320300bae6eca05e27a2083f80ed54282e 100644 (file)
@@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page)
 void deferred_split_huge_page(struct page *page);
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze);
+               unsigned long address, bool freeze, struct page *page);
 
 #define split_huge_pmd(__vma, __pmd, __address)                                \
        do {                                                            \
@@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                if (pmd_trans_huge(*____pmd)                            \
                                        || pmd_devmap(*____pmd))        \
                        __split_huge_pmd(__vma, __pmd, __address,       \
-                                               false);                 \
+                                               false, NULL);           \
        }  while (0)
 
 
index 7c27fa1030e873d1841f3e291e9fc806992967e3..feb04ea20f11cb6e1a39fc93f7ab9d054907de99 100644 (file)
@@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
 
 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+                            struct inet_diag_msg *r, int ext,
+                            struct user_namespace *user_ns);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* _INET_DIAG_H_ */
index a805474df4abd8c70c83bdbd3b383ad9c02eca1c..56e6069d245271539f14bf34c204122665e28ab6 100644 (file)
@@ -97,6 +97,11 @@ enum mem_cgroup_events_target {
 #define MEM_CGROUP_ID_SHIFT    16
 #define MEM_CGROUP_ID_MAX      USHRT_MAX
 
+struct mem_cgroup_id {
+       int id;
+       atomic_t ref;
+};
+
 struct mem_cgroup_stat_cpu {
        long count[MEMCG_NR_STAT];
        unsigned long events[MEMCG_NR_EVENTS];
@@ -172,6 +177,9 @@ enum memcg_kmem_state {
 struct mem_cgroup {
        struct cgroup_subsys_state css;
 
+       /* Private memcg ID. Used to ID objects that outlive the cgroup */
+       struct mem_cgroup_id id;
+
        /* Accounted resources */
        struct page_counter memory;
        struct page_counter swap;
@@ -330,22 +338,9 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
        if (mem_cgroup_disabled())
                return 0;
 
-       return memcg->css.id;
-}
-
-/**
- * mem_cgroup_from_id - look up a memcg from an id
- * @id: the id to look up
- *
- * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
- */
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
-{
-       struct cgroup_subsys_state *css;
-
-       css = css_from_id(id, &memory_cgrp_subsys);
-       return mem_cgroup_from_css(css);
+       return memcg->id.id;
 }
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
 
 /**
  * parent_mem_cgroup - find the accounting parent of a memcg
index c18a4c19d6fcedcd66adebdb45c383af09930408..ce9230af09c20c0498e1111fdf148b8f6406b30e 100644 (file)
@@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
 static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
                                      unsigned reg_cnt, unsigned char *val)
 {
-       int ret;
+       int ret = 0;
        int i;
 
        for (i = 0; i < reg_cnt; i++) {
index 80dec87a94f8431024d88a6303f1f9189b2114e6..d46a0e7f144df536bfba29325904a2a0bf8c7cf7 100644 (file)
@@ -466,6 +466,7 @@ enum {
 enum {
        MLX4_INTERFACE_STATE_UP         = 1 << 0,
        MLX4_INTERFACE_STATE_DELETION   = 1 << 1,
+       MLX4_INTERFACE_STATE_SHUTDOWN   = 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
index 80776d0c52dc9c48b7a02842caacbf9699e73507..fd72ecf0ce9fe55e3e0b1be1bc1ea6231c4378f2 100644 (file)
@@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent {
        void                   *uout;
        int                     uout_size;
        mlx5_cmd_cbk_t          callback;
+       struct delayed_work     cb_timeout_work;
        void                   *context;
        int                     idx;
        struct completion       done;
index 9aa49a05fe389214c46b6ea48577c813d525a874..25aa03b51c4e1e2f6fa1a9d257493dc62098f982 100644 (file)
@@ -251,7 +251,8 @@ do {                                                                        \
        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
        if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
            net_ratelimit())                                            \
-               __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);    \
+               __dynamic_pr_debug(&descriptor, pr_fmt(fmt),            \
+                                  ##__VA_ARGS__);                      \
 } while (0)
 #elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)                          \
index f45929ce815725d868261e9a2585ac53d0c8f128..da4b33bea9828857a28ab9060cbfe2d491cd3058 100644 (file)
@@ -4145,6 +4145,13 @@ static inline void netif_keep_dst(struct net_device *dev)
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
 }
 
+/* return true if dev can't cope with mtu frames that need vlan tag insertion */
+static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+{
+       /* TODO: reserve and use an additional IFF bit, if we get more users */
+       return dev->priv_flags & IFF_MACSEC;
+}
+
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
index 5b5a80cc59265882813cddba568b0875797e5e6b..c818772d9f9d13538309226a89894b03a78c1aa4 100644 (file)
@@ -43,10 +43,8 @@ struct posix_acl_entry {
 };
 
 struct posix_acl {
-       union {
-               atomic_t                a_refcount;
-               struct rcu_head         a_rcu;
-       };
+       atomic_t                a_refcount;
+       struct rcu_head         a_rcu;
        unsigned int            a_count;
        struct posix_acl_entry  a_entries[0];
 };
index 908b67c847cd656489f0a679422852e7c845d18f..c038ae36b10e7580645520d443d5faa8a792e645 100644 (file)
@@ -464,6 +464,8 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm)
 
 static inline void pwm_apply_args(struct pwm_device *pwm)
 {
+       struct pwm_state state = { };
+
        /*
         * PWM users calling pwm_apply_args() expect to have a fresh config
         * where the polarity and period are set according to pwm_args info.
@@ -476,18 +478,20 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
         * at startup (even if they are actually enabled), thus authorizing
         * polarity setting.
         *
-        * Instead of setting ->enabled to false, we call pwm_disable()
-        * before pwm_set_polarity() to ensure that everything is configured
-        * as expected, and the PWM is really disabled when the user request
-        * it.
+        * To fulfill this requirement, we apply a new state which disables
+        * the PWM device and set the reference period and polarity config.
         *
         * Note that PWM users requiring a smooth handover between the
         * bootloader and the kernel (like critical regulators controlled by
         * PWM devices) will have to switch to the atomic API and avoid calling
         * pwm_apply_args().
         */
-       pwm_disable(pwm);
-       pwm_set_polarity(pwm, pwm->args.polarity);
+
+       state.enabled = false;
+       state.polarity = pwm->args.polarity;
+       state.period = pwm->args.period;
+
+       pwm_apply_state(pwm, &state);
 }
 
 struct pwm_lookup {
index 6ae8cb4a61d39969283a382076c8cf261a77bfa6..6c876a63558d71c4ff3a957fba45cfc3b2bbbc3a 100644 (file)
@@ -49,6 +49,7 @@ struct qed_start_vport_params {
        bool drop_ttl0;
        u8 vport_id;
        u16 mtu;
+       bool clear_stats;
 };
 
 struct qed_stop_rxq_params {
index cb4b7e8cee81a40cbfedf0a781c7f701ed9e6ee0..eca6f626c16e7d513489341327588846e1239ffb 100644 (file)
@@ -407,6 +407,7 @@ static inline __must_check
 void **radix_tree_iter_retry(struct radix_tree_iter *iter)
 {
        iter->next_index = iter->index;
+       iter->tags = 0;
        return NULL;
 }
 
index ec0306ce7b92ab6f0e4bb420347ca9301b1355e3..45a4abeb6acba0193fffd134176a956950b53a4a 100644 (file)
@@ -84,8 +84,8 @@ static inline struct reset_control *__devm_reset_control_get(
 #endif /* CONFIG_RESET_CONTROLLER */
 
 /**
- * reset_control_get - Lookup and obtain an exclusive reference to a
- *                     reset controller.
+ * reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ *                               to a reset controller.
  * @dev: device to be reset by the controller
  * @id: reset line name
  *
@@ -98,8 +98,8 @@ static inline struct reset_control *__devm_reset_control_get(
  *
  * Use of id names is optional.
  */
-static inline struct reset_control *__must_check reset_control_get(
-                                       struct device *dev, const char *id)
+static inline struct reset_control *
+__must_check reset_control_get_exclusive(struct device *dev, const char *id)
 {
 #ifndef CONFIG_RESET_CONTROLLER
        WARN_ON(1);
@@ -107,12 +107,6 @@ static inline struct reset_control *__must_check reset_control_get(
        return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
 }
 
-static inline struct reset_control *reset_control_get_optional(
-                                       struct device *dev, const char *id)
-{
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
-}
-
 /**
  * reset_control_get_shared - Lookup and obtain a shared reference to a
  *                            reset controller.
@@ -141,9 +135,21 @@ static inline struct reset_control *reset_control_get_shared(
        return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
 }
 
+static inline struct reset_control *reset_control_get_optional_exclusive(
+                                       struct device *dev, const char *id)
+{
+       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
+}
+
+static inline struct reset_control *reset_control_get_optional_shared(
+                                       struct device *dev, const char *id)
+{
+       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
+}
+
 /**
- * of_reset_control_get - Lookup and obtain an exclusive reference to a
- *                        reset controller.
+ * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ *                                  to a reset controller.
  * @node: device to be reset by the controller
  * @id: reset line name
  *
@@ -151,15 +157,41 @@ static inline struct reset_control *reset_control_get_shared(
  *
  * Use of id names is optional.
  */
-static inline struct reset_control *of_reset_control_get(
+static inline struct reset_control *of_reset_control_get_exclusive(
                                struct device_node *node, const char *id)
 {
        return __of_reset_control_get(node, id, 0, 0);
 }
 
 /**
- * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
- *                                 a reset controller by index.
+ * of_reset_control_get_shared - Lookup and obtain an shared reference
+ *                               to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_shared(
+                               struct device_node *node, const char *id)
+{
+       return __of_reset_control_get(node, id, 0, 1);
+}
+
+/**
+ * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
+ *                                           reference to a reset controller
+ *                                           by index.
  * @node: device to be reset by the controller
  * @index: index of the reset controller
  *
@@ -167,49 +199,60 @@ static inline struct reset_control *of_reset_control_get(
  * in whatever order. Returns a struct reset_control or IS_ERR() condition
  * containing errno.
  */
-static inline struct reset_control *of_reset_control_get_by_index(
+static inline struct reset_control *of_reset_control_get_exclusive_by_index(
                                        struct device_node *node, int index)
 {
        return __of_reset_control_get(node, NULL, index, 0);
 }
 
 /**
- * devm_reset_control_get - resource managed reset_control_get()
- * @dev: device to be reset by the controller
- * @id: reset line name
+ * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ *                                        reference to a reset controller
+ *                                        by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
  *
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
  */
-static inline struct reset_control *__must_check devm_reset_control_get(
-                                       struct device *dev, const char *id)
-{
-#ifndef CONFIG_RESET_CONTROLLER
-       WARN_ON(1);
-#endif
-       return __devm_reset_control_get(dev, id, 0, 0);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
-                                       struct device *dev, const char *id)
+static inline struct reset_control *of_reset_control_get_shared_by_index(
+                                       struct device_node *node, int index)
 {
-       return __devm_reset_control_get(dev, id, 0, 0);
+       return __of_reset_control_get(node, NULL, index, 1);
 }
 
 /**
- * devm_reset_control_get_by_index - resource managed reset_control_get
+ * devm_reset_control_get_exclusive - resource managed
+ *                                    reset_control_get_exclusive()
  * @dev: device to be reset by the controller
- * @index: index of the reset controller
+ * @id: reset line name
  *
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * Managed reset_control_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
  */
-static inline struct reset_control *devm_reset_control_get_by_index(
-                                       struct device *dev, int index)
+static inline struct reset_control *
+__must_check devm_reset_control_get_exclusive(struct device *dev,
+                                             const char *id)
 {
-       return __devm_reset_control_get(dev, NULL, index, 0);
+#ifndef CONFIG_RESET_CONTROLLER
+       WARN_ON(1);
+#endif
+       return __devm_reset_control_get(dev, id, 0, 0);
 }
 
 /**
@@ -227,6 +270,36 @@ static inline struct reset_control *devm_reset_control_get_shared(
        return __devm_reset_control_get(dev, id, 0, 1);
 }
 
+static inline struct reset_control *devm_reset_control_get_optional_exclusive(
+                                       struct device *dev, const char *id)
+{
+       return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional_shared(
+                                       struct device *dev, const char *id)
+{
+       return __devm_reset_control_get(dev, id, 0, 1);
+}
+
+/**
+ * devm_reset_control_get_exclusive_by_index - resource managed
+ *                                             reset_control_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_exclusive(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
+{
+       return __devm_reset_control_get(dev, NULL, index, 0);
+}
+
 /**
  * devm_reset_control_get_shared_by_index - resource managed
  * reset_control_get_shared
@@ -237,10 +310,60 @@ static inline struct reset_control *devm_reset_control_get_shared(
  * this function, reset_control_put() is called automatically on driver detach.
  * See reset_control_get_shared() for more information.
  */
-static inline struct reset_control *devm_reset_control_get_shared_by_index(
-                                       struct device *dev, int index)
+static inline struct reset_control *
+devm_reset_control_get_shared_by_index(struct device *dev, int index)
 {
        return __devm_reset_control_get(dev, NULL, index, 1);
 }
 
+/*
+ * TEMPORARY calls to use during transition:
+ *
+ *   of_reset_control_get() => of_reset_control_get_exclusive()
+ *
+ * These inline function calls will be removed once all consumers
+ * have been moved over to the new explicit API.
+ */
+static inline struct reset_control *reset_control_get(
+                               struct device *dev, const char *id)
+{
+       return reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *reset_control_get_optional(
+                                       struct device *dev, const char *id)
+{
+       return reset_control_get_optional_exclusive(dev, id);
+}
+
+static inline struct reset_control *of_reset_control_get(
+                               struct device_node *node, const char *id)
+{
+       return of_reset_control_get_exclusive(node, id);
+}
+
+static inline struct reset_control *of_reset_control_get_by_index(
+                               struct device_node *node, int index)
+{
+       return of_reset_control_get_exclusive_by_index(node, index);
+}
+
+static inline struct reset_control *devm_reset_control_get(
+                               struct device *dev, const char *id)
+{
+       return devm_reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+                               struct device *dev, const char *id)
+{
+       return devm_reset_control_get_optional_exclusive(dev, id);
+
+}
+
+static inline struct reset_control *devm_reset_control_get_by_index(
+                               struct device *dev, int index)
+{
+       return devm_reset_control_get_exclusive_by_index(dev, index);
+}
 #endif
index 49eb4f8ebac9636a394dbe097532c70a08e056d7..2b0fad83683f793959fbe330045abb7c684edfcf 100644 (file)
@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page);
 /*
  * rmap interfaces called when adding or removing pte of page
  */
-void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_move_anon_rmap(struct page *, struct vm_area_struct *);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
                unsigned long, bool);
 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
index ee38a41274759f279be1c0752a7fab63fac517c8..f39b37180c414deb6d71c0ab5d674f89958630c0 100644 (file)
@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
 }
 
 void __skb_get_hash(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(struct sk_buff *skb);
 u32 skb_get_poff(const struct sk_buff *skb);
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
                   const struct flow_keys *keys, int hlen);
@@ -2869,6 +2870,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
                skb->csum = csum_partial(start, len, skb->csum);
 }
 
+/**
+ *     skb_push_rcsum - push skb and update receive checksum
+ *     @skb: buffer to update
+ *     @len: length of data pulled
+ *
+ *     This function performs an skb_push on the packet and updates
+ *     the CHECKSUM_COMPLETE checksum.  It should be used on
+ *     receive path processing instead of skb_push unless you know
+ *     that the checksum difference is zero (e.g., a valid IP header)
+ *     or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
+                                           unsigned int len)
+{
+       skb_push(skb, len);
+       skb_postpush_rcsum(skb, skb->data, len);
+       return skb->data;
+}
+
 /**
  *     pskb_trim_rcsum - trim received skb and update checksum
  *     @skb: buffer to trim
index 4018b48f2b3b4f115802fba8b67fcc3cd113c773..a0596ca0e80ac77aeb0afa29648532ef51a5deae 100644 (file)
@@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
 {
        switch (sk->sk_family) {
        case AF_INET:
+               if (sk->sk_type == SOCK_RAW)
+                       return SKNLGRP_NONE;
+
                switch (sk->sk_protocol) {
                case IPPROTO_TCP:
                        return SKNLGRP_INET_TCP_DESTROY;
@@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
                        return SKNLGRP_NONE;
                }
        case AF_INET6:
+               if (sk->sk_type == SOCK_RAW)
+                       return SKNLGRP_NONE;
+
                switch (sk->sk_protocol) {
                case IPPROTO_TCP:
                        return SKNLGRP_INET6_TCP_DESTROY;
index 966889a20ea375a29fdba96ab2615babf073e8d5..e479033bd7829148de1a63928c6db99ed91295aa 100644 (file)
@@ -180,11 +180,11 @@ struct ehci_regs {
  * PORTSCx
  */
        /* HOSTPC: offset 0x84 */
-       u32             hostpc[1];      /* HOSTPC extension */
+       u32             hostpc[0];      /* HOSTPC extension */
 #define HOSTPC_PHCD    (1<<22)         /* Phy clock disable */
 #define HOSTPC_PSPD    (3<<25)         /* Port speed detection */
 
-       u32             reserved5[16];
+       u32             reserved5[17];
 
        /* USBMODE_EX: offset 0xc8 */
        u32             usbmode_ex;     /* USB Device mode extension */
index 791800ddd6d90de23d7b7acea7a91e500e646eda..6360c259da6d62cd3c4b99c858acd6db19eff401 100644 (file)
@@ -34,6 +34,9 @@
 
 #define BOND_DEFAULT_MIIMON    100
 
+#ifndef __long_aligned
+#define __long_aligned __attribute__((aligned((sizeof(long)))))
+#endif
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
        struct reciprocal_value reciprocal_packets_per_slave;
        u16 ad_actor_sys_prio;
        u16 ad_user_port_key;
-       u8 ad_actor_system[ETH_ALEN];
+
+       /* 2 bytes of padding : see ether_addr_equal_64bits() */
+       u8 ad_actor_system[ETH_ALEN + 2];
 };
 
 struct bond_parm_tbl {
index 5dce30a6abe3e67091c5d14facd2750edd8426f0..7a54a31d1d4cf7988ff7bd3bf3015b4a8f23e359 100644 (file)
@@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
                                       u8 name_assign_type);
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                    bool *csum_err, __be16 proto);
+                    bool *csum_err, __be16 proto, int nhs);
 
 static inline int gre_calc_hlen(__be16 o_flags)
 {
index 37165fba3741ac68e5a93a8a22473eae70361e45..08f36cd2b874b5493bc9b51c4071722591dcc321 100644 (file)
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
        return min(dst->dev->mtu, IP_MAX_MTU);
 }
 
-static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+                                         const struct sk_buff *skb)
 {
-       struct sock *sk = skb->sk;
-
        if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
                bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
 
index dd78bea227c8b0baf2fd14eb878f26f265ac76e0..b6083c34ef0d4ed0d03f9c030307f335d3a4cba0 100644 (file)
@@ -284,6 +284,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
        return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
 }
 
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+       long timeout = (long)ct->timeout.expires - (long)jiffies;
+
+       return timeout > 0 ? timeout : 0;
+}
+
 struct kernel_param;
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
index 092235458691fd5eadc09cab3266d100f7140dc6..f7c291ff4074510333888184b3b481c77eefc546 100644 (file)
@@ -167,6 +167,7 @@ struct nft_set_elem {
 
 struct nft_set;
 struct nft_set_iter {
+       u8              genmask;
        unsigned int    count;
        unsigned int    skip;
        int             err;
index 649d2a8c17fc36f04b4d317c41c33edd6cdfc8b7..ff5be7e8ddeae6f9d2f9eac889d7abbfbd396bbd 100644 (file)
@@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
  */
 void sock_gen_put(struct sock *sk);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+                    unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+                                const int nested)
+{
+       return __sk_receive_skb(sk, skb, nested, 1);
+}
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
index 985619a593230a6532017e4f61dfaa945b04a08f..1d8e158241da742eb845ac30bab4292cec33d479 100644 (file)
@@ -60,7 +60,7 @@ struct switchdev_attr {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
                unsigned long brport_flags;             /* PORT_BRIDGE_FLAGS */
-               u32 ageing_time;                        /* BRIDGE_AGEING_TIME */
+               clock_t ageing_time;                    /* BRIDGE_AGEING_TIME */
                bool vlan_filtering;                    /* BRIDGE_VLAN_FILTERING */
        } u;
 };
index dc9a09aefb3368cec313d31af7cbc60d76c54fe8..c55facd17b7ec0b4c38ecf7e3690118056a93567 100644 (file)
@@ -36,7 +36,7 @@ struct tcf_meta_ops {
        int     (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
        int     (*decode)(struct sk_buff *, void *, u16 len);
        int     (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
-       int     (*alloc)(struct tcf_meta_info *, void *);
+       int     (*alloc)(struct tcf_meta_info *, void *, gfp_t);
        void    (*release)(struct tcf_meta_info *);
        int     (*validate)(void *val, int len);
        struct module   *owner;
@@ -48,8 +48,8 @@ int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
                        const void *dval);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
 int ife_validate_meta_u32(void *val, int len);
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
new file mode 100644 (file)
index 0000000..bf66f5d
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH  0x1
+#define DRM_VGEM_FENCE_SIGNAL  0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH    DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL    DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+       __u32 handle;
+       __u32 flags;
+#define VGEM_FENCE_WRITE       0x1
+       __u32 out_fence;
+       __u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+       __u32 fence;
+       __u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
index 8bdae34d1f9add25d968182c6540c431d994c7ce..ec10cfef166afb3a7cdc777ade3903e076d30376 100644 (file)
@@ -245,6 +245,7 @@ endif
 header-y += hw_breakpoint.h
 header-y += l2tp.h
 header-y += libc-compat.h
+header-y += lirc.h
 header-y += limits.h
 header-y += llc.h
 header-y += loop.h
index 5974fae54e12c534826d323b35917956275fd5a6..27e17363263abcc0ee9a6459ecd247de07b71a86 100644 (file)
  *
  *  7.24
  *  - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
+ *
+ *  7.25
+ *  - add FUSE_PARALLEL_DIROPS
  */
 
 #ifndef _LINUX_FUSE_H
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 24
+#define FUSE_KERNEL_MINOR_VERSION 25
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -234,6 +237,7 @@ struct fuse_file_lock {
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
  * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
+ * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -253,6 +257,7 @@ struct fuse_file_lock {
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
 #define FUSE_NO_OPEN_SUPPORT   (1 << 17)
+#define FUSE_PARALLEL_DIROPS    (1 << 18)
 
 /**
  * CUSE INIT request/reply flags
index 87cf351bab03998d51817f7085d2d1f7abda0b43..d6d071fc3c568249bf70f24602f95770eb1643df 100644 (file)
 #define KEY_KBDINPUTASSIST_ACCEPT              0x264
 #define KEY_KBDINPUTASSIST_CANCEL              0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP                   0x266
+#define KEY_RIGHT_DOWN                 0x267
+#define KEY_LEFT_UP                    0x268
+#define KEY_LEFT_DOWN                  0x269
+
+#define KEY_ROOT_MENU                  0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU             0x26b
+#define KEY_NUMERIC_11                 0x26c
+#define KEY_NUMERIC_12                 0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC                 0x26e
+#define KEY_3D_MODE                    0x26f
+#define KEY_NEXT_FAVORITE              0x270
+#define KEY_STOP_RECORD                        0x271
+#define KEY_PAUSE_RECORD               0x272
+#define KEY_VOD                                0x273 /* Video on Demand */
+#define KEY_UNMUTE                     0x274
+#define KEY_FASTREVERSE                        0x275
+#define KEY_SLOWREVERSE                        0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA                       0x275
+
 #define BTN_TRIGGER_HAPPY              0x2c0
 #define BTN_TRIGGER_HAPPY1             0x2c0
 #define BTN_TRIGGER_HAPPY2             0x2c1
 #define SW_ROTATE_LOCK         0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT       0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE         0x0e  /* set = device disabled */
+#define SW_PEN_INSERTED                0x0f  /* set = pen inserted */
 #define SW_MAX                 0x0f
 #define SW_CNT                 (SW_MAX+1)
 
index 01113841190d3f9ee6c654b2f728d95b44a62bb3..c514941198173c0a73c19d5441867d5d7fa4fe76 100644 (file)
@@ -247,6 +247,7 @@ struct input_mask {
 #define BUS_ATARI              0x1B
 #define BUS_SPI                        0x1C
 #define BUS_RMI                        0x1D
+#define BUS_CEC                        0x1E
 
 /*
  * MT_TOOL types
index 1d973d2ba417247f4265d8dee6bf6566ae1f9047..cd26d7a0fd07094499f97568dc8ca4f50ae93c0f 100644 (file)
@@ -33,6 +33,7 @@ header-y += xt_NFLOG.h
 header-y += xt_NFQUEUE.h
 header-y += xt_RATEEST.h
 header-y += xt_SECMARK.h
+header-y += xt_SYNPROXY.h
 header-y += xt_TCPMSS.h
 header-y += xt_TCPOPTSTRIP.h
 header-y += xt_TEE.h
index 2d59fbaa93c621af56c8bc01e98445c4e54ae745..ca67e61d2a619e6a017e0d254844cbc49ab96629 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_SYNPROXY_H
 #define _XT_SYNPROXY_H
 
+#include <linux/types.h>
+
 #define XT_SYNPROXY_OPT_MSS            0x01
 #define XT_SYNPROXY_OPT_WSCALE         0x02
 #define XT_SYNPROXY_OPT_SACK_PERM      0x04
index f755a602d4a176e006dc2cb5830d1c4812bac81f..c02d89777713b8772e47c2759570f4a838cb18e7 100644 (file)
@@ -1458,6 +1458,7 @@ config KALLSYMS_ALL
 
 config KALLSYMS_ABSOLUTE_PERCPU
        bool
+       depends on KALLSYMS
        default X86_64 && SMP
 
 config KALLSYMS_BASE_RELATIVE
index 22bb4f24f071df56dbddcb3f84fbf3ad2e8c63bc..8d528f9930dad639c22c797da715545db717ce6e 100644 (file)
@@ -1883,6 +1883,23 @@ out_null:
        audit_log_format(ab, " exe=(null)");
 }
 
+struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+       struct tty_struct *tty = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tsk->sighand->siglock, flags);
+       if (tsk->signal)
+               tty = tty_kref_get(tsk->signal->tty);
+       spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+       return tty;
+}
+
+void audit_put_tty(struct tty_struct *tty)
+{
+       tty_kref_put(tty);
+}
+
 void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
 {
        const struct cred *cred;
index cbbe6bb6496ea934f86ffcfedffe971c76bbb692..a492f4c4e7106aa7c4c23e7bcb8e502477df6638 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/audit.h>
 #include <linux/skbuff.h>
 #include <uapi/linux/mqueue.h>
+#include <linux/tty.h>
 
 /* AUDIT_NAMES is the number of slots we reserve in the audit_context
  * for saving names from getname().  If we get more names we will allocate
@@ -262,6 +263,9 @@ extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
 extern void audit_log_d_path_exe(struct audit_buffer *ab,
                                 struct mm_struct *mm);
 
+extern struct tty_struct *audit_get_tty(struct task_struct *tsk);
+extern void audit_put_tty(struct tty_struct *tty);
+
 /* audit watch functions */
 #ifdef CONFIG_AUDIT_WATCH
 extern void audit_put_watch(struct audit_watch *watch);
index 62ab53d7619cfb249154276e40a38d1e69a906f6..2672d105cffcdaf7703d9f655e7338dd0b102cc0 100644 (file)
@@ -63,7 +63,6 @@
 #include <asm/unistd.h>
 #include <linux/security.h>
 #include <linux/list.h>
-#include <linux/tty.h>
 #include <linux/binfmts.h>
 #include <linux/highmem.h>
 #include <linux/syscalls.h>
@@ -1985,14 +1984,15 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
        if (!audit_enabled)
                return;
 
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+       if (!ab)
+               return;
+
        uid = from_kuid(&init_user_ns, task_uid(current));
        oldloginuid = from_kuid(&init_user_ns, koldloginuid);
        loginuid = from_kuid(&init_user_ns, kloginuid),
        tty = audit_get_tty(current);
 
-       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
-       if (!ab)
-               return;
        audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
        audit_log_task_context(ab);
        audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
index 668e07903c8f1a3950c4e494eb7443748710f08c..eec9f90ba030410a5104991cfcd377400cb4bb7d 100644 (file)
  * are set to NOT_INIT to indicate that they are no longer readable.
  */
 
-/* types of values stored in eBPF registers */
-enum bpf_reg_type {
-       NOT_INIT = 0,            /* nothing was written into register */
-       UNKNOWN_VALUE,           /* reg doesn't contain a valid pointer */
-       PTR_TO_CTX,              /* reg points to bpf_context */
-       CONST_PTR_TO_MAP,        /* reg points to struct bpf_map */
-       PTR_TO_MAP_VALUE,        /* reg points to map element value */
-       PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
-       FRAME_PTR,               /* reg == frame_pointer */
-       PTR_TO_STACK,            /* reg == frame_pointer + imm */
-       CONST_IMM,               /* constant integer value */
-
-       /* PTR_TO_PACKET represents:
-        * skb->data
-        * skb->data + imm
-        * skb->data + (u16) var
-        * skb->data + (u16) var + imm
-        * if (range > 0) then [ptr, ptr + range - off) is safe to access
-        * if (id > 0) means that some 'var' was added
-        * if (off > 0) menas that 'imm' was added
-        */
-       PTR_TO_PACKET,
-       PTR_TO_PACKET_END,       /* skb->data + headlen */
-};
-
 struct reg_state {
        enum bpf_reg_type type;
        union {
@@ -695,10 +670,10 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
 
 /* check access to 'struct bpf_context' fields */
 static int check_ctx_access(struct verifier_env *env, int off, int size,
-                           enum bpf_access_type t)
+                           enum bpf_access_type t, enum bpf_reg_type *reg_type)
 {
        if (env->prog->aux->ops->is_valid_access &&
-           env->prog->aux->ops->is_valid_access(off, size, t)) {
+           env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
                /* remember the offset of last byte accessed in ctx */
                if (env->prog->aux->max_ctx_offset < off + size)
                        env->prog->aux->max_ctx_offset = off + size;
@@ -798,21 +773,19 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                        mark_reg_unknown_value(state->regs, value_regno);
 
        } else if (reg->type == PTR_TO_CTX) {
+               enum bpf_reg_type reg_type = UNKNOWN_VALUE;
+
                if (t == BPF_WRITE && value_regno >= 0 &&
                    is_pointer_value(env, value_regno)) {
                        verbose("R%d leaks addr into ctx\n", value_regno);
                        return -EACCES;
                }
-               err = check_ctx_access(env, off, size, t);
+               err = check_ctx_access(env, off, size, t, &reg_type);
                if (!err && t == BPF_READ && value_regno >= 0) {
                        mark_reg_unknown_value(state->regs, value_regno);
-                       if (off == offsetof(struct __sk_buff, data) &&
-                           env->allow_ptr_leaks)
+                       if (env->allow_ptr_leaks)
                                /* note that reg.[id|off|range] == 0 */
-                               state->regs[value_regno].type = PTR_TO_PACKET;
-                       else if (off == offsetof(struct __sk_buff, data_end) &&
-                                env->allow_ptr_leaks)
-                               state->regs[value_regno].type = PTR_TO_PACKET_END;
+                               state->regs[value_regno].type = reg_type;
                }
 
        } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
index 86cb5c6e89320f28e17691c6d69e58c9dfde81fb..75c0ff00aca60d298062755539e83cbfeaffaaf2 100644 (file)
@@ -837,6 +837,8 @@ static void put_css_set_locked(struct css_set *cset)
 
 static void put_css_set(struct css_set *cset)
 {
+       unsigned long flags;
+
        /*
         * Ensure that the refcount doesn't hit zero while any readers
         * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -845,9 +847,9 @@ static void put_css_set(struct css_set *cset)
        if (atomic_add_unless(&cset->refcount, -1, 1))
                return;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irqsave(&css_set_lock, flags);
        put_css_set_locked(cset);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irqrestore(&css_set_lock, flags);
 }
 
 /*
@@ -1070,11 +1072,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
 
        /* First see if we already have a cgroup group that matches
         * the desired set */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        cset = find_existing_css_set(old_cset, cgrp, template);
        if (cset)
                get_css_set(cset);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        if (cset)
                return cset;
@@ -1102,7 +1104,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
         * find_existing_css_set() */
        memcpy(cset->subsys, template, sizeof(cset->subsys));
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        /* Add reference counts and links from the new css_set. */
        list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
@@ -1128,7 +1130,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
                css_get(css);
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        return cset;
 }
@@ -1192,7 +1194,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
         * Release all the links from cset_links to this hierarchy's
         * root cgroup
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
                list_del(&link->cset_link);
@@ -1200,7 +1202,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
                kfree(link);
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        if (!list_empty(&root->root_list)) {
                list_del(&root->root_list);
@@ -1600,11 +1602,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
                ss->root = dst_root;
                css->cgroup = dcgrp;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                hash_for_each(css_set_table, i, cset, hlist)
                        list_move_tail(&cset->e_cset_node[ss->id],
                                       &dcgrp->e_csets[ss->id]);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
 
                /* default hierarchy doesn't enable controllers by default */
                dst_root->subsys_mask |= 1 << ssid;
@@ -1640,10 +1642,10 @@ static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
        if (!buf)
                return -ENOMEM;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
        len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        if (len >= PATH_MAX)
                len = -ERANGE;
@@ -1897,7 +1899,7 @@ static void cgroup_enable_task_cg_lists(void)
 {
        struct task_struct *p, *g;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        if (use_task_css_set_links)
                goto out_unlock;
@@ -1922,8 +1924,12 @@ static void cgroup_enable_task_cg_lists(void)
                 * entry won't be deleted though the process has exited.
                 * Do it while holding siglock so that we don't end up
                 * racing against cgroup_exit().
+                *
+                * Interrupts were already disabled while acquiring
+                * the css_set_lock, so we do not need to disable it
+                * again when acquiring the sighand->siglock here.
                 */
-               spin_lock_irq(&p->sighand->siglock);
+               spin_lock(&p->sighand->siglock);
                if (!(p->flags & PF_EXITING)) {
                        struct css_set *cset = task_css_set(p);
 
@@ -1932,11 +1938,11 @@ static void cgroup_enable_task_cg_lists(void)
                        list_add_tail(&p->cg_list, &cset->tasks);
                        get_css_set(cset);
                }
-               spin_unlock_irq(&p->sighand->siglock);
+               spin_unlock(&p->sighand->siglock);
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
 out_unlock:
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 }
 
 static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -2043,13 +2049,13 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
         * Link the root cgroup in this hierarchy into all the css_set
         * objects.
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        hash_for_each(css_set_table, i, cset, hlist) {
                link_css_set(&tmp_links, cset, root_cgrp);
                if (css_set_populated(cset))
                        cgroup_update_populated(root_cgrp, true);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        BUG_ON(!list_empty(&root_cgrp->self.children));
        BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2256,11 +2262,11 @@ out_mount:
                struct cgroup *cgrp;
 
                mutex_lock(&cgroup_mutex);
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
 
                cgrp = cset_cgroup_from_root(ns->root_cset, root);
 
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
                mutex_unlock(&cgroup_mutex);
 
                nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
@@ -2337,11 +2343,11 @@ char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
        char *ret;
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
 
        return ret;
@@ -2369,7 +2375,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
        char *path = NULL;
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
 
@@ -2382,7 +2388,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
                        path = buf;
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        return path;
 }
@@ -2557,7 +2563,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
         * the new cgroup.  There are no failure cases after here, so this
         * is the commit point.
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(cset, &tset->src_csets, mg_node) {
                list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
                        struct css_set *from_cset = task_css_set(task);
@@ -2568,7 +2574,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
                        put_css_set_locked(from_cset);
                }
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /*
         * Migration is committed, all target tasks are now on dst_csets.
@@ -2597,13 +2603,13 @@ out_cancel_attach:
                }
        } while_each_subsys_mask();
 out_release_tset:
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_splice_init(&tset->dst_csets, &tset->src_csets);
        list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
                list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
                list_del_init(&cset->mg_node);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        return ret;
 }
 
@@ -2634,7 +2640,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
 
        lockdep_assert_held(&cgroup_mutex);
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
                cset->mg_src_cgrp = NULL;
                cset->mg_dst_cgrp = NULL;
@@ -2642,7 +2648,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
                list_del_init(&cset->mg_preload_node);
                put_css_set_locked(cset);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 }
 
 /**
@@ -2783,7 +2789,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
         * already PF_EXITING could be freed from underneath us unless we
         * take an rcu_read_lock.
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
@@ -2792,7 +2798,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        return cgroup_taskset_migrate(&tset, root);
 }
@@ -2816,7 +2822,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
                return -EBUSY;
 
        /* look up all src csets */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
@@ -2826,7 +2832,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /* prepare dst csets and commit */
        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
@@ -2859,9 +2865,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
                struct cgroup *cgrp;
                struct inode *inode;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
 
                while (!cgroup_is_descendant(dst_cgrp, cgrp))
                        cgrp = cgroup_parent(cgrp);
@@ -2962,9 +2968,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
                if (root == &cgrp_dfl_root)
                        continue;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                from_cgrp = task_cgroup_from_root(from, root);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
 
                retval = cgroup_attach_task(from_cgrp, tsk, false);
                if (retval)
@@ -3080,7 +3086,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
        percpu_down_write(&cgroup_threadgroup_rwsem);
 
        /* look up all csses currently attached to @cgrp's subtree */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
                struct cgrp_cset_link *link;
 
@@ -3088,14 +3094,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                        cgroup_migrate_add_src(link->cset, dsct,
                                               &preloaded_csets);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /* NULL dst indicates self on default hierarchy */
        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
        if (ret)
                goto out_finish;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
                struct task_struct *task, *ntask;
 
@@ -3107,7 +3113,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
                        cgroup_taskset_add(task, &tset);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        ret = cgroup_taskset_migrate(&tset, cgrp->root);
 out_finish:
@@ -3908,10 +3914,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
        int count = 0;
        struct cgrp_cset_link *link;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &cgrp->cset_links, cset_link)
                count += atomic_read(&link->cset->refcount);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        return count;
 }
 
@@ -4249,7 +4255,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
 
        memset(it, 0, sizeof(*it));
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        it->ss = css->ss;
 
@@ -4262,7 +4268,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
 
        css_task_iter_advance_css_set(it);
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 }
 
 /**
@@ -4280,7 +4286,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
                it->cur_task = NULL;
        }
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        if (it->task_pos) {
                it->cur_task = list_entry(it->task_pos, struct task_struct,
@@ -4289,7 +4295,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
                css_task_iter_advance(it);
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        return it->cur_task;
 }
@@ -4303,10 +4309,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
 void css_task_iter_end(struct css_task_iter *it)
 {
        if (it->cur_cset) {
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                list_del(&it->iters_node);
                put_css_set_locked(it->cur_cset);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
        }
 
        if (it->cur_task)
@@ -4338,10 +4344,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
        mutex_lock(&cgroup_mutex);
 
        /* all tasks in @from are being moved, all csets are source */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &from->cset_links, cset_link)
                cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
        if (ret)
@@ -5063,6 +5069,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        memset(css, 0, sizeof(*css));
        css->cgroup = cgrp;
        css->ss = ss;
+       css->id = -1;
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
        css->serial_nr = css_serial_nr_next++;
@@ -5150,7 +5157,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
 
        err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
        if (err < 0)
-               goto err_free_percpu_ref;
+               goto err_free_css;
        css->id = err;
 
        /* @css is ready to be brought online now, make it visible */
@@ -5174,9 +5181,6 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
 
 err_list_del:
        list_del_rcu(&css->sibling);
-       cgroup_idr_remove(&ss->css_idr, css->id);
-err_free_percpu_ref:
-       percpu_ref_exit(&css->refcnt);
 err_free_css:
        call_rcu(&css->rcu_head, css_free_rcu_fn);
        return ERR_PTR(err);
@@ -5451,10 +5455,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
         */
        cgrp->self.flags &= ~CSS_ONLINE;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &cgrp->cset_links, cset_link)
                link->cset->dead = true;
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /* initiate massacre of all css's */
        for_each_css(css, ssid, cgrp)
@@ -5725,7 +5729,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                goto out;
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        for_each_root(root) {
                struct cgroup_subsys *ss;
@@ -5778,7 +5782,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
 
        retval = 0;
 out_unlock:
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        kfree(buf);
 out:
@@ -5923,13 +5927,13 @@ void cgroup_post_fork(struct task_struct *child)
        if (use_task_css_set_links) {
                struct css_set *cset;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                cset = task_css_set(current);
                if (list_empty(&child->cg_list)) {
                        get_css_set(cset);
                        css_set_move_task(child, NULL, cset, false);
                }
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
        }
 
        /*
@@ -5974,9 +5978,9 @@ void cgroup_exit(struct task_struct *tsk)
        cset = task_css_set(tsk);
 
        if (!list_empty(&tsk->cg_list)) {
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                css_set_move_task(tsk, cset, NULL, false);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
        } else {
                get_css_set(cset);
        }
@@ -6044,9 +6048,9 @@ static void cgroup_release_agent(struct work_struct *work)
        if (!pathbuf || !agentbuf)
                goto out;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        if (!path)
                goto out;
 
@@ -6306,12 +6310,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
                return ERR_PTR(-EPERM);
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        cset = task_css_set(current);
        get_css_set(cset);
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
 
        new_ns = alloc_cgroup_ns();
@@ -6435,7 +6439,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
        if (!name_buf)
                return -ENOMEM;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        rcu_read_lock();
        cset = rcu_dereference(current->cgroups);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -6446,7 +6450,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
                           c->root->hierarchy_id, name_buf);
        }
        rcu_read_unlock();
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        kfree(name_buf);
        return 0;
 }
@@ -6457,7 +6461,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
        struct cgroup_subsys_state *css = seq_css(seq);
        struct cgrp_cset_link *link;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
                struct css_set *cset = link->cset;
                struct task_struct *task;
@@ -6480,7 +6484,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
        overflow:
                seq_puts(seq, "  ...\n");
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        return 0;
 }
 
index d948e44c471ea89aa2953f7a687c6b5783bbdf0e..7b61887f7ccdf57fdcb3083574a56c5580110d73 100644 (file)
@@ -1201,6 +1201,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown               = takedown_cpu,
                .cant_stop              = true,
        },
+#else
+       [CPUHP_BRINGUP_CPU] = { },
 #endif
 };
 
index 9c51ec3f0f440f35dda1bf9e9b3e02e421388df3..43d43a2d5811d548271c138c0e1ed31d541c19ad 100644 (file)
@@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
        return event->state == PERF_EVENT_STATE_DEAD;
 }
 
-static inline int pmu_filter_match(struct perf_event *event)
+static inline int __pmu_filter_match(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
        return pmu->filter_match ? pmu->filter_match(event) : 1;
 }
 
+/*
+ * Check whether we should attempt to schedule an event group based on
+ * PMU-specific filtering. An event group can consist of HW and SW events,
+ * potentially with a SW leader, so we must check all the filters, to
+ * determine whether a group is schedulable:
+ */
+static inline int pmu_filter_match(struct perf_event *event)
+{
+       struct perf_event *child;
+
+       if (!__pmu_filter_match(event))
+               return 0;
+
+       list_for_each_entry(child, &event->sibling_list, group_entry) {
+               if (!__pmu_filter_match(child))
+                       return 0;
+       }
+
+       return 1;
+}
+
 static inline int
 event_filter_match(struct perf_event *event)
 {
@@ -7529,7 +7550,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
        prog = event->tp_event->prog;
        if (prog) {
                event->tp_event->prog = NULL;
-               bpf_prog_put(prog);
+               bpf_prog_put_rcu(prog);
        }
 }
 
index e25e92fb44face315265d43d981b71e193693f2a..6a5c239c7669c5ad8e01565be5a6fd38ae7d4452 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/vmalloc.h>
 #include "gcov.h"
 
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
 #define GCOV_COUNTERS                  10
 #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
 #define GCOV_COUNTERS                  9
index 51d7105f529a5527ee8c90ac789f08583027a373..97ee9ac7e97c7743249295fa0c4d2da32740bbf7 100644 (file)
@@ -5394,13 +5394,15 @@ void idle_task_exit(void)
 /*
  * Since this CPU is going 'away' for a while, fold any nr_active delta
  * we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable.
+ * nr_active count is stable. We need to take the teardown thread which
+ * is calling this into account, so we hand in adjust = 1 to the load
+ * calculation.
  *
  * Also see the comment "Global load-average calculations".
  */
 static void calc_load_migrate(struct rq *rq)
 {
-       long delta = calc_load_fold_active(rq);
+       long delta = calc_load_fold_active(rq, 1);
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 }
index bdcbeea90c950523b5276cfd1d8464ceef2d4fd8..c8c5d2d484249048cca7dd62fdfc38fbe7de2883 100644 (file)
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
        }
 }
 
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
 #else
 void init_entity_runnable_average(struct sched_entity *se)
 {
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long tg_weight;
+       long tg_weight, load, shares;
 
        /*
-        * Use this CPU's real-time load instead of the last load contribution
-        * as the updating of the contribution is delayed, and we will use the
-        * the real-time load to calc the share. See update_tg_load_avg().
+        * This really should be: cfs_rq->avg.load_avg, but instead we use
+        * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+        * the shares for small weight interactive tasks.
         */
-       tg_weight = atomic_long_read(&tg->load_avg);
-       tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq->load.weight;
+       load = scale_load_down(cfs_rq->load.weight);
 
-       return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
-       long tg_weight, load, shares;
+       tg_weight = atomic_long_read(&tg->load_avg);
 
-       tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq->load.weight;
+       /* Ensure tg_weight >= load */
+       tg_weight -= cfs_rq->tg_load_avg_contrib;
+       tg_weight += load;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        return tg->shares;
 }
 # endif /* CONFIG_SMP */
+
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long w, W;
+               struct cfs_rq *cfs_rq = se->my_q;
+               long W, w = cfs_rq_load_avg(cfs_rq);
 
-               tg = se->my_q->tg;
+               tg = cfs_rq->tg;
 
                /*
                 * W = @wg + \Sum rw_j
                 */
-               W = wg + calc_tg_weight(tg, se->my_q);
+               W = wg + atomic_long_read(&tg->load_avg);
+
+               /* Ensure \Sum rw_j >= rw_i */
+               W -= cfs_rq->tg_load_avg_contrib;
+               W += w;
 
                /*
                 * w = rw_i + @wl
                 */
-               w = cfs_rq_load_avg(se->my_q) + wl;
+               w += wl;
 
                /*
                 * wl = S * s'_i; see (2)
index b0b93fd33af9e4bb4d61edcda77d3b761cb9b8de..a2d6eb71f06b80527b86dd99a83f6d4621265cec 100644 (file)
@@ -78,11 +78,11 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
        loads[2] = (avenrun[2] + offset) << shift;
 }
 
-long calc_load_fold_active(struct rq *this_rq)
+long calc_load_fold_active(struct rq *this_rq, long adjust)
 {
        long nr_active, delta = 0;
 
-       nr_active = this_rq->nr_running;
+       nr_active = this_rq->nr_running - adjust;
        nr_active += (long)this_rq->nr_uninterruptible;
 
        if (nr_active != this_rq->calc_load_active) {
@@ -188,7 +188,7 @@ void calc_load_enter_idle(void)
         * We're going into NOHZ mode, if there's any pending delta, fold it
         * into the pending idle delta.
         */
-       delta = calc_load_fold_active(this_rq);
+       delta = calc_load_fold_active(this_rq, 0);
        if (delta) {
                int idx = calc_load_write_idx();
 
@@ -389,7 +389,7 @@ void calc_global_load_tick(struct rq *this_rq)
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
-       delta  = calc_load_fold_active(this_rq);
+       delta  = calc_load_fold_active(this_rq, 0);
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
index 7cbeb92a1cb9361102b8514f612ee81be87fef10..898c0d2f18feb78e73746f0f3546b6f4de375dca 100644 (file)
@@ -28,7 +28,7 @@ extern unsigned long calc_load_update;
 extern atomic_long_t calc_load_tasks;
 
 extern void calc_global_load_tick(struct rq *this_rq);
-extern long calc_load_fold_active(struct rq *this_rq);
+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
 
 #ifdef CONFIG_SMP
 extern void cpu_load_update_active(struct rq *this_rq);
index 1cafba860b08ceb6030fa2a1f237e3950a4e0aa7..39008d78927acb4f9a62f582cb7baba3f27620ee 100644 (file)
@@ -777,6 +777,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
                        timer->it.cpu.expires = 0;
                        sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
                                           &itp->it_value);
+                       return;
                } else {
                        cpu_timer_sample_group(timer->it_clock, p, &now);
                        unlock_task_sighand(p, &flags);
index 720b7bb01d43ae5f49761da3a34d8fce4e55dbc2..26f603da7e26867fc47835a8fcd3eca4121e2d86 100644 (file)
@@ -209,6 +209,10 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
            event->pmu->count)
                return -EINVAL;
 
+       if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
+                    event->attr.type != PERF_TYPE_RAW))
+               return -EINVAL;
+
        /*
         * we don't know if the function is run successfully by the
         * return value. It can be judged in other places, such as
@@ -349,7 +353,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
 }
 
 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
-static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+                                       enum bpf_reg_type *reg_type)
 {
        /* check bounds */
        if (off < 0 || off >= sizeof(struct pt_regs))
@@ -427,7 +432,8 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
        }
 }
 
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+                                   enum bpf_reg_type *reg_type)
 {
        if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
                return false;
index e1c0e996b5ae63175feb24d5d6a6c8b541b63287..97e7b793df35be3a1ce01d3bf22ed2dc2d28fd84 100644 (file)
@@ -4600,15 +4600,11 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
                return;
 
-       /* is @cpu the only online CPU? */
        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
-       if (cpumask_weight(&cpumask) != 1)
-               return;
 
        /* as we're called from CPU_ONLINE, the following shouldn't fail */
        for_each_pool_worker(worker, pool)
-               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
-                                                 pool->attrs->cpumask) < 0);
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
 }
 
 /*
index 79bfe0e06907ac91875dddc9a3fbcf579b0c4d51..7bc04778f84dd6e2b68e4fadec74530f4bc64d57 100644 (file)
@@ -1009,8 +1009,6 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-               unsigned long isolated;
-
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
@@ -1034,36 +1032,30 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolated = isolate_freepages_block(cc, &isolate_start_pfn,
-                                               block_end_pfn, freelist, false);
-               /* If isolation failed early, do not continue needlessly */
-               if (!isolated && isolate_start_pfn < block_end_pfn &&
-                   cc->nr_migratepages > cc->nr_freepages)
-                       break;
+               isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+                                       freelist, false);
 
                /*
-                * If we isolated enough freepages, or aborted due to async
-                * compaction being contended, terminate the loop.
-                * Remember where the free scanner should restart next time,
-                * which is where isolate_freepages_block() left off.
-                * But if it scanned the whole pageblock, isolate_start_pfn
-                * now points at block_end_pfn, which is the start of the next
-                * pageblock.
-                * In that case we will however want to restart at the start
-                * of the previous pageblock.
+                * If we isolated enough freepages, or aborted due to lock
+                * contention, terminate.
                 */
                if ((cc->nr_freepages >= cc->nr_migratepages)
                                                        || cc->contended) {
-                       if (isolate_start_pfn >= block_end_pfn)
+                       if (isolate_start_pfn >= block_end_pfn) {
+                               /*
+                                * Restart at previous pageblock if more
+                                * freepages can be isolated next time.
+                                */
                                isolate_start_pfn =
                                        block_start_pfn - pageblock_nr_pages;
+                       }
                        break;
-               } else {
+               } else if (isolate_start_pfn < block_end_pfn) {
                        /*
-                        * isolate_freepages_block() should not terminate
-                        * prematurely unless contended, or isolated enough
+                        * If isolation failed early, do not continue
+                        * needlessly.
                         */
-                       VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+                       break;
                }
        }
 
index 9ed58530f6957bef1e2e0fa166ed87085dcae523..343a2b7e57aa25f7d02709ff13ab8a5e47c68a87 100644 (file)
@@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (next - addr != HPAGE_PMD_SIZE) {
                get_page(page);
                spin_unlock(ptl);
-               if (split_huge_page(page)) {
-                       put_page(page);
-                       unlock_page(page);
-                       goto out_unlocked;
-               }
+               split_huge_page(page);
                put_page(page);
                unlock_page(page);
-               ret = 1;
                goto out_unlocked;
        }
 
@@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze)
+               unsigned long address, bool freeze, struct page *page)
 {
        spinlock_t *ptl;
        struct mm_struct *mm = vma->vm_mm;
@@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
        ptl = pmd_lock(mm, pmd);
+
+       /*
+        * If caller asks to setup a migration entries, we need a page to check
+        * pmd against. Otherwise we can end up replacing wrong page.
+        */
+       VM_BUG_ON(freeze && !page);
+       if (page && page != pmd_page(*pmd))
+               goto out;
+
        if (pmd_trans_huge(*pmd)) {
-               struct page *page = pmd_page(*pmd);
+               page = pmd_page(*pmd);
                if (PageMlocked(page))
                        clear_page_mlock(page);
        } else if (!pmd_devmap(*pmd))
@@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                return;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
-               return;
 
-       /*
-        * If caller asks to setup a migration entries, we need a page to check
-        * pmd against. Otherwise we can end up replacing wrong page.
-        */
-       VM_BUG_ON(freeze && !page);
-       if (page && page != pmd_page(*pmd))
-               return;
-
-       /*
-        * Caller holds the mmap_sem write mode or the anon_vma lock,
-        * so a huge pmd cannot materialize from under us (khugepaged
-        * holds both the mmap_sem write mode and the anon_vma lock
-        * write mode).
-        */
-       __split_huge_pmd(vma, pmd, address, freeze);
+       __split_huge_pmd(vma, pmd, address, freeze, page);
 }
 
 void vma_adjust_trans_huge(struct vm_area_struct *vma,
index c1f3c0be150a94f09588672ce44ade9047d78958..addfe4accc076817cdbc4009bb9f44d5e61315d4 100644 (file)
@@ -3383,7 +3383,7 @@ retry_avoidcopy:
        /* If no-one else is actually using this page, avoid the copy
         * and just make the page writable */
        if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
-               page_move_anon_rmap(old_page, vma, address);
+               page_move_anon_rmap(old_page, vma);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
index 4973505a9bdde8fda4291debdf21bdadfaa76c3b..65793f150d1f6e1f77d32ad64d37966f0ad9704c 100644 (file)
@@ -238,30 +238,23 @@ static void qlist_move_cache(struct qlist_head *from,
                                   struct qlist_head *to,
                                   struct kmem_cache *cache)
 {
-       struct qlist_node *prev = NULL, *curr;
+       struct qlist_node *curr;
 
        if (unlikely(qlist_empty(from)))
                return;
 
        curr = from->head;
+       qlist_init(from);
        while (curr) {
-               struct qlist_node *qlink = curr;
-               struct kmem_cache *obj_cache = qlink_to_cache(qlink);
-
-               if (obj_cache == cache) {
-                       if (unlikely(from->head == qlink)) {
-                               from->head = curr->next;
-                               prev = curr;
-                       } else
-                               prev->next = curr->next;
-                       if (unlikely(from->tail == qlink))
-                               from->tail = curr->next;
-                       from->bytes -= cache->size;
-                       qlist_put(to, qlink, cache->size);
-               } else {
-                       prev = curr;
-               }
-               curr = curr->next;
+               struct qlist_node *next = curr->next;
+               struct kmem_cache *obj_cache = qlink_to_cache(curr);
+
+               if (obj_cache == cache)
+                       qlist_put(to, curr, obj_cache->size);
+               else
+                       qlist_put(from, curr, obj_cache->size);
+
+               curr = next;
        }
 }
 
index ac8664db38232f5ede345ee4b2f2f9ec0c5ac79d..5339c89dff6317510b2710e9ab2770c23ac71e1e 100644 (file)
@@ -4057,6 +4057,60 @@ static struct cftype mem_cgroup_legacy_files[] = {
        { },    /* terminate */
 };
 
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+       atomic_inc(&memcg->id.ref);
+}
+
+static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+       if (atomic_dec_and_test(&memcg->id.ref)) {
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+               memcg->id.id = 0;
+
+               /* Memcg ID pins CSS */
+               css_put(&memcg->css);
+       }
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       return idr_find(&mem_cgroup_idr, id);
+}
+
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
        struct mem_cgroup_per_node *pn;
@@ -4116,6 +4170,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (!memcg)
                return NULL;
 
+       memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+                                1, MEM_CGROUP_ID_MAX,
+                                GFP_KERNEL);
+       if (memcg->id.id < 0)
+               goto fail;
+
        memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
        if (!memcg->stat)
                goto fail;
@@ -4142,8 +4202,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&memcg->cgwb_list);
 #endif
+       idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
        return memcg;
 fail:
+       if (memcg->id.id > 0)
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
        mem_cgroup_free(memcg);
        return NULL;
 }
@@ -4206,12 +4269,11 @@ fail:
        return ERR_PTR(-ENOMEM);
 }
 
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
 {
-       if (css->id > MEM_CGROUP_ID_MAX)
-               return -ENOSPC;
-
+       /* Online state pins memcg ID, memcg ID pins CSS */
+       mem_cgroup_id_get(mem_cgroup_from_css(css));
+       css_get(css);
        return 0;
 }
 
@@ -4234,6 +4296,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        memcg_offline_kmem(memcg);
        wb_memcg_offline(memcg);
+
+       mem_cgroup_id_put(memcg);
 }
 
 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
@@ -5756,6 +5820,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!memcg)
                return;
 
+       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
@@ -5774,6 +5839,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        VM_BUG_ON(!irqs_disabled());
        mem_cgroup_charge_statistics(memcg, page, false, -1);
        memcg_check_events(memcg, page);
+
+       if (!mem_cgroup_is_root(memcg))
+               css_put(&memcg->css);
 }
 
 /*
@@ -5804,11 +5872,11 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
            !page_counter_try_charge(&memcg->swap, 1, &counter))
                return -ENOMEM;
 
+       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
 
-       css_get(&memcg->css);
        return 0;
 }
 
@@ -5837,7 +5905,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
                                page_counter_uncharge(&memcg->memsw, 1);
                }
                mem_cgroup_swap_statistics(memcg, false);
-               css_put(&memcg->css);
+               mem_cgroup_id_put(memcg);
        }
        rcu_read_unlock();
 }
index cd1f29e4897e3e5207071ed9cccabc55f86e6a87..9e046819e619487ed5a8e3cc65f4131812168692 100644 (file)
@@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                 * Protected against the rmap code by
                                 * the page lock.
                                 */
-                               page_move_anon_rmap(compound_head(old_page),
-                                                   vma, address);
+                               page_move_anon_rmap(old_page, vma);
                        }
                        unlock_page(old_page);
                        return wp_page_reuse(mm, vma, address, page_table, ptl,
index 6903b695ebaef81ef890f4b5e41c749d89dce2e1..8b3e1341b7544608cac4777a37bbd424432488e1 100644 (file)
@@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
 /* Returns true if the struct page for the pfn is uninitialised */
 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 {
-       if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+       int nid = early_pfn_to_nid(pfn);
+
+       if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
                return true;
 
        return false;
@@ -1273,7 +1275,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
        spin_lock(&early_pfn_lock);
        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
        if (nid < 0)
-               nid = 0;
+               nid = first_online_node;
        spin_unlock(&early_pfn_lock);
 
        return nid;
index 0ea5d9071b32b967d012f36e600a2ee75acd8f3d..701b93fea2a0677cd8a58e646360747bf5626c5a 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);
  * page_move_anon_rmap - move a page to our anon_vma
  * @page:      the page to move to our anon_vma
  * @vma:       the vma the page belongs to
- * @address:   the user virtual address mapped
  *
  * When a page belongs exclusively to one process after a COW event,
  * that page can be moved into the anon_vma that belongs to just that
  * process, so the rmap code will not search the parent or sibling
  * processes.
  */
-void page_move_anon_rmap(struct page *page,
-       struct vm_area_struct *vma, unsigned long address)
+void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
 
+       page = compound_head(page);
+
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_VMA(!anon_vma, vma);
-       if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
-               address &= HPAGE_PMD_MASK;
-       VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        /*
@@ -1427,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        goto out;
        }
 
-       pte = page_check_address(page, mm, address, &ptl, 0);
+       pte = page_check_address(page, mm, address, &ptl,
+                                PageTransCompound(page));
        if (!pte)
                goto out;
 
index 24463b67b6efa5817e7c1e806d1bf6337d300ba2..171dee7a131f6959fe444405f280374be7eb25d0 100644 (file)
@@ -2225,9 +2225,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                        error = shmem_getpage(inode, index, &page, SGP_FALLOC);
                if (error) {
                        /* Remove the !PageUptodate pages we added */
-                       shmem_undo_range(inode,
-                               (loff_t)start << PAGE_SHIFT,
-                               ((loff_t)index << PAGE_SHIFT) - 1, true);
+                       if (index > start) {
+                               shmem_undo_range(inode,
+                                   (loff_t)start << PAGE_SHIFT,
+                                   ((loff_t)index << PAGE_SHIFT) - 1, true);
+                       }
                        goto undone;
                }
 
index a65dad7fdcd12495a51eabd91fc76ed96edb0576..82317abb03edc7aa2c89e0a20032fc57a532a723 100644 (file)
@@ -526,8 +526,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
                goto out_unlock;
 
        cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
-       cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
-                              css->id, memcg_name_buf);
+       cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+                              css->serial_nr, memcg_name_buf);
        if (!cache_name)
                goto out_unlock;
 
index 8a75f8d2916af99a1efb6101666038dcd4309c04..577277546d985db41aee898a3c69b1b0488d289b 100644 (file)
@@ -491,7 +491,7 @@ static int __init workingset_init(void)
        max_order = fls_long(totalram_pages - 1);
        if (max_order > timestamp_bits)
                bucket_order = max_order - timestamp_bits;
-       printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+       pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
               timestamp_bits, max_order, bucket_order);
 
        ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
index 86ae75b77390964ee42056c0dd1763650d9fb9ff..516b0e73263c7cac1db1546d05eb6a1d6f642ab5 100644 (file)
@@ -146,10 +146,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
 
 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
-       /* TODO: gotta make sure the underlying layer can handle it,
-        * maybe an IFF_VLAN_CAPABLE flag for devices?
-        */
-       if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+       unsigned int max_mtu = real_dev->mtu;
+
+       if (netif_reduces_vlan_mtu(real_dev))
+               max_mtu -= VLAN_HLEN;
+       if (max_mtu < new_mtu)
                return -ERANGE;
 
        dev->mtu = new_mtu;
index c92b52f37d38de143022f172881dd03f076b0194..1270207f3d7c9dd6fde0e1f7839acfe18a4d82c9 100644 (file)
@@ -118,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
 {
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev;
+       unsigned int max_mtu;
        __be16 proto;
        int err;
 
@@ -144,9 +145,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                return err;
 
+       max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+                                                    real_dev->mtu;
        if (!tb[IFLA_MTU])
-               dev->mtu = real_dev->mtu;
-       else if (dev->mtu > real_dev->mtu)
+               dev->mtu = max_mtu;
+       else if (dev->mtu > max_mtu)
                return -EINVAL;
 
        err = vlan_changelink(dev, tb, data);
index fbd0acf80b13236bd8c768bc8bf5d69d6a7e7125..2fdebabbfacd14690abaca3c57dfcddf944466a1 100644 (file)
@@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock)
                        release_sock(sk);
                        ax25_disconnect(ax25, 0);
                        lock_sock(sk);
-                       ax25_destroy_socket(ax25);
+                       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+                               ax25_destroy_socket(ax25);
                        break;
 
                case AX25_STATE_3:
index 951cd57bb07df3930410a998b12c468ac413d62b..5237dff6941d81e3b2cd98c47013746c58798e77 100644 (file)
@@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
        switch (ax25->state) {
 
        case AX25_STATE_0:
+       case AX25_STATE_2:
                /* Magic here: If we listen() and a new link dies before it
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
                                sock_hold(sk);
                                ax25_destroy_socket(ax25);
                                bh_unlock_sock(sk);
+                               /* Ungrab socket and destroy it */
                                sock_put(sk);
                        } else
                                ax25_destroy_socket(ax25);
@@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
        case AX25_STATE_2:
                if (ax25->n2count == ax25->n2) {
                        ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
-                       ax25_disconnect(ax25, ETIMEDOUT);
+                       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+                               ax25_disconnect(ax25, ETIMEDOUT);
                        return;
                } else {
                        ax25->n2count++;
index 004467c9e6e15ada9c6eae056cabff7dd8d5777e..2c0d6ef66f9d581d31b88ecf0875f78b90728a36 100644 (file)
@@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
 
        switch (ax25->state) {
        case AX25_STATE_0:
+       case AX25_STATE_2:
                /* Magic here: If we listen() and a new link dies before it
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
                                sock_hold(sk);
                                ax25_destroy_socket(ax25);
                                bh_unlock_sock(sk);
+                               /* Ungrab socket and destroy it */
                                sock_put(sk);
                        } else
                                ax25_destroy_socket(ax25);
@@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25)
        case AX25_STATE_2:
                if (ax25->n2count == ax25->n2) {
                        ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
-                       ax25_disconnect(ax25, ETIMEDOUT);
+                       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+                               ax25_disconnect(ax25, ETIMEDOUT);
                        return;
                } else {
                        ax25->n2count++;
index 3b78e8473a01b4a82e376266b04078e714ce1e26..655a7d4c96e1e26b7390b8c783cebb2a31ca4ca8 100644 (file)
@@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
 {
        ax25_clear_queues(ax25);
 
-       ax25_stop_heartbeat(ax25);
+       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+               ax25_stop_heartbeat(ax25);
        ax25_stop_t1timer(ax25);
        ax25_stop_t2timer(ax25);
        ax25_stop_t3timer(ax25);
index 748a9ead7ce50fd65a0a09f2b19eaa035a566327..825a5cdf4382d08ff3b90c7499fd605f940b150b 100644 (file)
@@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 static void batadv_claim_release(struct kref *ref)
 {
        struct batadv_bla_claim *claim;
+       struct batadv_bla_backbone_gw *old_backbone_gw;
 
        claim = container_of(ref, struct batadv_bla_claim, refcount);
 
-       batadv_backbone_gw_put(claim->backbone_gw);
+       spin_lock_bh(&claim->backbone_lock);
+       old_backbone_gw = claim->backbone_gw;
+       claim->backbone_gw = NULL;
+       spin_unlock_bh(&claim->backbone_lock);
+
+       spin_lock_bh(&old_backbone_gw->crc_lock);
+       old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+       spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+       batadv_backbone_gw_put(old_backbone_gw);
+
        kfree_rcu(claim, rcu);
 }
 
@@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                break;
        }
 
-       if (vid & BATADV_VLAN_HAS_TAG)
+       if (vid & BATADV_VLAN_HAS_TAG) {
                skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
                                      vid & VLAN_VID_MASK);
+               if (!skb)
+                       goto out;
+       }
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
@@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid,
                                 struct batadv_bla_backbone_gw *backbone_gw)
 {
+       struct batadv_bla_backbone_gw *old_backbone_gw;
        struct batadv_bla_claim *claim;
        struct batadv_bla_claim search_claim;
+       bool remove_crc = false;
        int hash_added;
 
        ether_addr_copy(search_claim.addr, mac);
@@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                        return;
 
                ether_addr_copy(claim->addr, mac);
+               spin_lock_init(&claim->backbone_lock);
                claim->vid = vid;
                claim->lasttime = jiffies;
+               kref_get(&backbone_gw->refcount);
                claim->backbone_gw = backbone_gw;
 
                kref_init(&claim->refcount);
@@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
                           mac, BATADV_PRINT_VID(vid));
 
-               spin_lock_bh(&claim->backbone_gw->crc_lock);
-               claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
-               spin_unlock_bh(&claim->backbone_gw->crc_lock);
-               batadv_backbone_gw_put(claim->backbone_gw);
+               remove_crc = true;
        }
-       /* set (new) backbone gw */
+
+       /* replace backbone_gw atomically and adjust reference counters */
+       spin_lock_bh(&claim->backbone_lock);
+       old_backbone_gw = claim->backbone_gw;
        kref_get(&backbone_gw->refcount);
        claim->backbone_gw = backbone_gw;
+       spin_unlock_bh(&claim->backbone_lock);
 
+       if (remove_crc) {
+               /* remove claim address from old backbone_gw */
+               spin_lock_bh(&old_backbone_gw->crc_lock);
+               old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+               spin_unlock_bh(&old_backbone_gw->crc_lock);
+       }
+
+       batadv_backbone_gw_put(old_backbone_gw);
+
+       /* add claim address to new backbone_gw */
        spin_lock_bh(&backbone_gw->crc_lock);
        backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
        spin_unlock_bh(&backbone_gw->crc_lock);
@@ -736,6 +765,26 @@ claim_free_ref:
        batadv_claim_put(claim);
 }
 
+/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ *  claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+       struct batadv_bla_backbone_gw *backbone_gw;
+
+       spin_lock_bh(&claim->backbone_lock);
+       backbone_gw = claim->backbone_gw;
+       kref_get(&backbone_gw->refcount);
+       spin_unlock_bh(&claim->backbone_lock);
+
+       return backbone_gw;
+}
+
 /**
  * batadv_bla_del_claim - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
@@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                           batadv_choose_claim, claim);
        batadv_claim_put(claim); /* reference from the hash is gone */
 
-       spin_lock_bh(&claim->backbone_gw->crc_lock);
-       claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
-       spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
@@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    int now)
 {
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_bla_claim *claim;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
@@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
+                       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
                        if (now)
                                goto purge_now;
-                       if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+                       if (!batadv_compare_eth(backbone_gw->orig,
                                                primary_if->net_dev->dev_addr))
-                               continue;
+                               goto skip;
+
                        if (!batadv_has_timed_out(claim->lasttime,
                                                  BATADV_BLA_CLAIM_TIMEOUT))
-                               continue;
+                               goto skip;
 
                        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                                   "bla_purge_claims(): %pM, vid %d, time out\n",
@@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 
 purge_now:
                        batadv_handle_unclaim(bat_priv, primary_if,
-                                             claim->backbone_gw->orig,
+                                             backbone_gw->orig,
                                              claim->addr, claim->vid);
+skip:
+                       batadv_backbone_gw_put(backbone_gw);
                }
                rcu_read_unlock();
        }
@@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                   unsigned short vid, bool is_bcast)
 {
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
        struct batadv_hard_iface *primary_if;
+       bool own_claim;
        bool ret;
 
        ethhdr = eth_hdr(skb);
@@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        }
 
        /* if it is our own claim ... */
-       if (batadv_compare_eth(claim->backbone_gw->orig,
-                              primary_if->net_dev->dev_addr)) {
+       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+       own_claim = batadv_compare_eth(backbone_gw->orig,
+                                      primary_if->net_dev->dev_addr);
+       batadv_backbone_gw_put(backbone_gw);
+
+       if (own_claim) {
                /* ... allow it in any case */
                claim->lasttime = jiffies;
                goto allow;
@@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_hard_iface *primary_if;
+       bool client_roamed;
        bool ret = false;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                goto allow;
 
        /* check if we are responsible. */
-       if (batadv_compare_eth(claim->backbone_gw->orig,
-                              primary_if->net_dev->dev_addr)) {
+       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+       client_roamed = batadv_compare_eth(backbone_gw->orig,
+                                          primary_if->net_dev->dev_addr);
+       batadv_backbone_gw_put(backbone_gw);
+
+       if (client_roamed) {
                /* if yes, the client has roamed and we have
                 * to unclaim it.
                 */
@@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
        struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_bla_claim *claim;
        struct batadv_hard_iface *primary_if;
        struct hlist_head *head;
@@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
-                       is_own = batadv_compare_eth(claim->backbone_gw->orig,
+                       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+                       is_own = batadv_compare_eth(backbone_gw->orig,
                                                    primary_addr);
 
-                       spin_lock_bh(&claim->backbone_gw->crc_lock);
-                       backbone_crc = claim->backbone_gw->crc;
-                       spin_unlock_bh(&claim->backbone_gw->crc_lock);
+                       spin_lock_bh(&backbone_gw->crc_lock);
+                       backbone_crc = backbone_gw->crc;
+                       spin_unlock_bh(&backbone_gw->crc_lock);
                        seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
                                   claim->addr, BATADV_PRINT_VID(claim->vid),
-                                  claim->backbone_gw->orig,
+                                  backbone_gw->orig,
                                   (is_own ? 'x' : ' '),
                                   backbone_crc);
+
+                       batadv_backbone_gw_put(backbone_gw);
                }
                rcu_read_unlock();
        }
index 278800a99c69458c083da635bd39bcd19e788842..aee3b39914717390ed4fe2899a781b6e7625451f 100644 (file)
@@ -1009,9 +1009,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                if (!skb_new)
                        goto out;
 
-               if (vid & BATADV_VLAN_HAS_TAG)
+               if (vid & BATADV_VLAN_HAS_TAG) {
                        skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                                  vid & VLAN_VID_MASK);
+                       if (!skb_new)
+                               goto out;
+               }
 
                skb_reset_mac_header(skb_new);
                skb_new->protocol = eth_type_trans(skb_new,
@@ -1089,9 +1092,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
         */
        skb_reset_mac_header(skb_new);
 
-       if (vid & BATADV_VLAN_HAS_TAG)
+       if (vid & BATADV_VLAN_HAS_TAG) {
                skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                          vid & VLAN_VID_MASK);
+               if (!skb_new)
+                       goto out;
+       }
 
        /* To preserve backwards compatibility, the node has choose the outgoing
         * format based on the incoming request packet type. The assumption is
index 7f51bc2c06eb7699492c41f2fe46fe85b26147a2..ab8c4f9738fe2206f878ed9f0dfda8803455ec5b 100644 (file)
@@ -765,6 +765,8 @@ static void batadv_orig_node_release(struct kref *ref)
        struct batadv_neigh_node *neigh_node;
        struct batadv_orig_node *orig_node;
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_orig_node_vlan *vlan;
+       struct batadv_orig_ifinfo *last_candidate;
 
        orig_node = container_of(ref, struct batadv_orig_node, refcount);
 
@@ -782,8 +784,21 @@ static void batadv_orig_node_release(struct kref *ref)
                hlist_del_rcu(&orig_ifinfo->list);
                batadv_orig_ifinfo_put(orig_ifinfo);
        }
+
+       last_candidate = orig_node->last_bonding_candidate;
+       orig_node->last_bonding_candidate = NULL;
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
+       if (last_candidate)
+               batadv_orig_ifinfo_put(last_candidate);
+
+       spin_lock_bh(&orig_node->vlan_list_lock);
+       hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+               hlist_del_rcu(&vlan->list);
+               batadv_orig_node_vlan_put(vlan);
+       }
+       spin_unlock_bh(&orig_node->vlan_list_lock);
+
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
index e3857ed4057f55d2f90ff72ed7cb8a14532d6fe2..bfac086b4d015053f88dee70e63bebd1ae9b3c56 100644 (file)
@@ -374,6 +374,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                if (skb_cow(skb, ETH_HLEN) < 0)
                        goto out;
 
+               ethhdr = eth_hdr(skb);
                icmph = (struct batadv_icmp_header *)skb->data;
                icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
                if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
@@ -454,6 +455,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
+/**
+ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+                           struct batadv_orig_ifinfo *new_candidate)
+{
+       struct batadv_orig_ifinfo *old_candidate;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       old_candidate = orig_node->last_bonding_candidate;
+
+       if (new_candidate)
+               kref_get(&new_candidate->refcount);
+       orig_node->last_bonding_candidate = new_candidate;
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
+       if (old_candidate)
+               batadv_orig_ifinfo_put(old_candidate);
+}
+
 /**
  * batadv_find_router - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -561,10 +585,6 @@ next:
        }
        rcu_read_unlock();
 
-       /* last_bonding_candidate is reset below, remove the old reference. */
-       if (orig_node->last_bonding_candidate)
-               batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
-
        /* After finding candidates, handle the three cases:
         * 1) there is a next candidate, use that
         * 2) there is no next candidate, use the first of the list
@@ -573,21 +593,28 @@ next:
        if (next_candidate) {
                batadv_neigh_node_put(router);
 
-               /* remove references to first candidate, we don't need it. */
-               if (first_candidate) {
-                       batadv_neigh_node_put(first_candidate_router);
-                       batadv_orig_ifinfo_put(first_candidate);
-               }
+               kref_get(&next_candidate_router->refcount);
                router = next_candidate_router;
-               orig_node->last_bonding_candidate = next_candidate;
+               batadv_last_bonding_replace(orig_node, next_candidate);
        } else if (first_candidate) {
                batadv_neigh_node_put(router);
 
-               /* refcounting has already been done in the loop above. */
+               kref_get(&first_candidate_router->refcount);
                router = first_candidate_router;
-               orig_node->last_bonding_candidate = first_candidate;
+               batadv_last_bonding_replace(orig_node, first_candidate);
        } else {
-               orig_node->last_bonding_candidate = NULL;
+               batadv_last_bonding_replace(orig_node, NULL);
+       }
+
+       /* cleanup of candidates */
+       if (first_candidate) {
+               batadv_neigh_node_put(first_candidate_router);
+               batadv_orig_ifinfo_put(first_candidate);
+       }
+
+       if (next_candidate) {
+               batadv_neigh_node_put(next_candidate_router);
+               batadv_orig_ifinfo_put(next_candidate);
        }
 
        return router;
index f2f125684ed9c199e1c4f7f7b3b6109b3b9a51c2..010397650fa5b7c2c4e4912dc680440dbaf7e21d 100644 (file)
@@ -424,8 +424,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
        struct batadv_orig_node *orig_node;
 
        orig_node = batadv_gw_get_selected_orig(bat_priv);
-       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
-                                      orig_node, vid);
+       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+                                      BATADV_P_DATA, orig_node, vid);
 }
 
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
index 343d2c90439928cfd986654de265eeb10ca9a10a..287a3879ed7e38dfd0f0d3f79e2cb2580ea84276 100644 (file)
@@ -1033,7 +1033,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
 static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
                                          struct list_head *head)
 {
+       struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_hard_iface *hard_iface;
+       struct batadv_softif_vlan *vlan;
 
        list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
                if (hard_iface->soft_iface == soft_iface)
@@ -1041,6 +1043,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
                                                        BATADV_IF_CLEANUP_KEEP);
        }
 
+       /* destroy the "untagged" VLAN */
+       vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+       if (vlan) {
+               batadv_softif_destroy_vlan(bat_priv, vlan);
+               batadv_softif_vlan_put(vlan);
+       }
+
        batadv_sysfs_del_meshif(soft_iface);
        unregister_netdevice_queue(soft_iface, head);
 }
index feaf492b01ca011b221fbfb74bba295da794ac15..57ec87f37050d98faefc37e989024a572d26b421 100644 (file)
@@ -650,8 +650,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
 
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
-       if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
-                addr, BATADV_PRINT_VID(vid))) {
+       if (!vlan) {
+               net_ratelimited_function(batadv_info, soft_iface,
+                                        "adding TT local entry %pM to non-existent VLAN %d\n",
+                                        addr, BATADV_PRINT_VID(vid));
                kfree(tt_local);
                tt_local = NULL;
                goto out;
@@ -691,7 +693,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        if (unlikely(hash_added != 0)) {
                /* remove the reference for the hash */
                batadv_tt_local_entry_put(tt_local);
-               batadv_softif_vlan_put(vlan);
                goto out;
        }
 
@@ -2269,6 +2270,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
        return crc;
 }
 
+/**
+ * batadv_tt_req_node_release - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+       struct batadv_tt_req_node *tt_req_node;
+
+       tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
+
+       kfree(tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ *  possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
+{
+       kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node;
@@ -2278,7 +2302,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 
        hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                hlist_del_init(&node->list);
-               kfree(node);
+               batadv_tt_req_node_put(node);
        }
 
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2315,7 +2339,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
                        hlist_del_init(&node->list);
-                       kfree(node);
+                       batadv_tt_req_node_put(node);
                }
        }
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2347,9 +2371,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
        if (!tt_req_node)
                goto unlock;
 
+       kref_init(&tt_req_node->refcount);
        ether_addr_copy(tt_req_node->addr, orig_node->orig);
        tt_req_node->issued_at = jiffies;
 
+       kref_get(&tt_req_node->refcount);
        hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2613,13 +2639,19 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
+
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
-               /* hlist_del_init() verifies tt_req_node still is in the list */
-               hlist_del_init(&tt_req_node->list);
+               if (!hlist_unhashed(&tt_req_node->list)) {
+                       hlist_del_init(&tt_req_node->list);
+                       batadv_tt_req_node_put(tt_req_node);
+               }
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
-               kfree(tt_req_node);
        }
+
+       if (tt_req_node)
+               batadv_tt_req_node_put(tt_req_node);
+
        kfree(tvlv_tt_data);
        return ret;
 }
@@ -3055,7 +3087,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
                if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
                hlist_del_init(&node->list);
-               kfree(node);
+               batadv_tt_req_node_put(node);
        }
 
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
index 6a577f4f8ba77ce1b2b923c55deada8a050802e8..74d865a4df464f03a54f1d7f9f87ad5a095ad088 100644 (file)
@@ -330,7 +330,9 @@ struct batadv_orig_node {
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
        u32 last_bcast_seqno;
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list and router */
+       /* neigh_list_lock protects: neigh_list, ifinfo_list,
+        * last_bonding_candidate and router
+        */
        spinlock_t neigh_list_lock;
        struct hlist_node hash_entry;
        struct batadv_priv *bat_priv;
@@ -1042,6 +1044,7 @@ struct batadv_bla_backbone_gw {
  * @addr: mac address of claimed non-mesh client
  * @vid: vlan id this client was detected on
  * @backbone_gw: pointer to backbone gw claiming this client
+ * @backbone_lock: lock protecting backbone_gw pointer
  * @lasttime: last time we heard of claim (locals only)
  * @hash_entry: hlist node for batadv_priv_bla::claim_hash
  * @refcount: number of contexts the object is used
@@ -1051,6 +1054,7 @@ struct batadv_bla_claim {
        u8 addr[ETH_ALEN];
        unsigned short vid;
        struct batadv_bla_backbone_gw *backbone_gw;
+       spinlock_t backbone_lock; /* protects backbone_gw */
        unsigned long lasttime;
        struct hlist_node hash_entry;
        struct rcu_head rcu;
@@ -1137,11 +1141,13 @@ struct batadv_tt_change_node {
  * struct batadv_tt_req_node - data to keep track of the tt requests in flight
  * @addr: mac address address of the originator this request was sent to
  * @issued_at: timestamp used for purging stale tt requests
+ * @refcount: number of contexts the object is used by
  * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
        u8 addr[ETH_ALEN];
        unsigned long issued_at;
+       struct kref refcount;
        struct hlist_node list;
 };
 
index 16079772222860977264133fa49bb0cf33b9b0d5..43d2cd862bc260f81c3ea51c1b3106984db45623 100644 (file)
@@ -213,8 +213,7 @@ drop:
 }
 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
 
-/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void __br_handle_local_finish(struct sk_buff *skb)
 {
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
        u16 vid = 0;
@@ -222,6 +221,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu
        /* check if vlan is allowed, to avoid spoofing */
        if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
                br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+}
+
+/* note: already called with rcu_read_lock */
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+       __br_handle_local_finish(skb);
 
        BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
        br_pass_frame_up(skb);
@@ -274,7 +281,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                        if (p->br->stp_enabled == BR_NO_STP ||
                            fwd_mask & (1u << dest[5]))
                                goto forward;
-                       break;
+                       *pskb = skb;
+                       __br_handle_local_finish(skb);
+                       return RX_HANDLER_PASS;
 
                case 0x01:      /* IEEE MAC (Pause) */
                        goto drop;
index 6852f3c7009c2b1cc2cbfa9e08b6b15104b4d12b..43844144c9c4f17a11252e3b1cc1ef4162d7ddb7 100644 (file)
@@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
                               &ip6h->saddr)) {
                kfree_skb(skb);
+               br->has_ipv6_addr = 0;
                return NULL;
        }
+
+       br->has_ipv6_addr = 1;
        ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
@@ -1745,6 +1748,7 @@ void br_multicast_init(struct net_bridge *br)
        br->ip6_other_query.delay_time = 0;
        br->ip6_querier.port = NULL;
 #endif
+       br->has_ipv6_addr = 1;
 
        spin_lock_init(&br->multicast_lock);
        setup_timer(&br->multicast_router_timer,
index 2d25979273a6f57378da645460d9d6c2a0d91e5c..77e7f69bf80d4ca8e31e09b5b07230bca1abf170 100644 (file)
@@ -700,7 +700,7 @@ static int
 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                  int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
-       unsigned int mtu = ip_skb_dst_mtu(skb);
+       unsigned int mtu = ip_skb_dst_mtu(sk, skb);
        struct iphdr *iph = ip_hdr(skb);
 
        if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
index a5343c7232bfbf37ca95580a70a740950354e691..85e89f693589d1a7e712d9165cdac8147e0bf02c 100644 (file)
@@ -1273,7 +1273,7 @@ static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
                struct bridge_vlan_xstats vxi;
                struct br_vlan_stats stats;
 
-               if (vl_idx++ < *prividx)
+               if (++vl_idx < *prividx)
                        continue;
                memset(&vxi, 0, sizeof(vxi));
                vxi.vid = v->vid;
index c7fb5d7a7218c2c286160874fc30278108083ca6..52edecf3c294cde218e79e90bf86c7595578b062 100644 (file)
@@ -314,6 +314,7 @@ struct net_bridge
        u8                              multicast_disabled:1;
        u8                              multicast_querier:1;
        u8                              multicast_query_use_ifaddr:1;
+       u8                              has_ipv6_addr:1;
 
        u32                             hash_elasticity;
        u32                             hash_max;
@@ -588,10 +589,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
 
 static inline bool
 __br_multicast_querier_exists(struct net_bridge *br,
-                             struct bridge_mcast_other_query *querier)
+                               struct bridge_mcast_other_query *querier,
+                               const bool is_ipv6)
 {
+       bool own_querier_enabled;
+
+       if (br->multicast_querier) {
+               if (is_ipv6 && !br->has_ipv6_addr)
+                       own_querier_enabled = false;
+               else
+                       own_querier_enabled = true;
+       } else {
+               own_querier_enabled = false;
+       }
+
        return time_is_before_jiffies(querier->delay_time) &&
-              (br->multicast_querier || timer_pending(&querier->timer));
+              (own_querier_enabled || timer_pending(&querier->timer));
 }
 
 static inline bool br_multicast_querier_exists(struct net_bridge *br,
@@ -599,10 +612,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
 {
        switch (eth->h_proto) {
        case (htons(ETH_P_IP)):
-               return __br_multicast_querier_exists(br, &br->ip4_other_query);
+               return __br_multicast_querier_exists(br,
+                       &br->ip4_other_query, false);
 #if IS_ENABLED(CONFIG_IPV6)
        case (htons(ETH_P_IPV6)):
-               return __br_multicast_querier_exists(br, &br->ip6_other_query);
+               return __br_multicast_querier_exists(br,
+                       &br->ip6_other_query, true);
 #endif
        default:
                return false;
index 03062bb763b34cc64eb7c0b38c042801e0be0b1c..7e480bf75bcf5bfd69bd89cdccf29fc082a15e16 100644 (file)
@@ -1260,6 +1260,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
        return map;
 }
 
+/*
+ * Encoding order is (new_up_client, new_state, new_weight).  Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ *     new_up_client: { osd=6, addr=... } # set osd_state and addr
+ *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+                                     struct ceph_osdmap *map)
+{
+       void *new_up_client;
+       void *new_state;
+       void *new_weight_end;
+       u32 len;
+
+       new_up_client = *p;
+       ceph_decode_32_safe(p, end, len, e_inval);
+       len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+       ceph_decode_need(p, end, len, e_inval);
+       *p += len;
+
+       new_state = *p;
+       ceph_decode_32_safe(p, end, len, e_inval);
+       len *= sizeof(u32) + sizeof(u8);
+       ceph_decode_need(p, end, len, e_inval);
+       *p += len;
+
+       /* new_weight */
+       ceph_decode_32_safe(p, end, len, e_inval);
+       while (len--) {
+               s32 osd;
+               u32 w;
+
+               ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+               osd = ceph_decode_32(p);
+               w = ceph_decode_32(p);
+               BUG_ON(osd >= map->max_osd);
+               pr_info("osd%d weight 0x%x %s\n", osd, w,
+                    w == CEPH_OSD_IN ? "(in)" :
+                    (w == CEPH_OSD_OUT ? "(out)" : ""));
+               map->osd_weight[osd] = w;
+
+               /*
+                * If we are marking in, set the EXISTS, and clear the
+                * AUTOOUT and NEW bits.
+                */
+               if (w) {
+                       map->osd_state[osd] |= CEPH_OSD_EXISTS;
+                       map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+                                                CEPH_OSD_NEW);
+               }
+       }
+       new_weight_end = *p;
+
+       /* new_state (up/down) */
+       *p = new_state;
+       len = ceph_decode_32(p);
+       while (len--) {
+               s32 osd;
+               u8 xorstate;
+               int ret;
+
+               osd = ceph_decode_32(p);
+               xorstate = ceph_decode_8(p);
+               if (xorstate == 0)
+                       xorstate = CEPH_OSD_UP;
+               BUG_ON(osd >= map->max_osd);
+               if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+                   (xorstate & CEPH_OSD_UP))
+                       pr_info("osd%d down\n", osd);
+               if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+                   (xorstate & CEPH_OSD_EXISTS)) {
+                       pr_info("osd%d does not exist\n", osd);
+                       map->osd_weight[osd] = CEPH_OSD_IN;
+                       ret = set_primary_affinity(map, osd,
+                                                  CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+                       if (ret)
+                               return ret;
+                       memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+                       map->osd_state[osd] = 0;
+               } else {
+                       map->osd_state[osd] ^= xorstate;
+               }
+       }
+
+       /* new_up_client */
+       *p = new_up_client;
+       len = ceph_decode_32(p);
+       while (len--) {
+               s32 osd;
+               struct ceph_entity_addr addr;
+
+               osd = ceph_decode_32(p);
+               ceph_decode_copy(p, &addr, sizeof(addr));
+               ceph_decode_addr(&addr);
+               BUG_ON(osd >= map->max_osd);
+               pr_info("osd%d up\n", osd);
+               map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+               map->osd_addr[osd] = addr;
+       }
+
+       *p = new_weight_end;
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
 /*
  * decode and apply an incremental map update.
  */
@@ -1358,49 +1467,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                        __remove_pg_pool(&map->pg_pools, pi);
        }
 
-       /* new_up */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd;
-               struct ceph_entity_addr addr;
-               ceph_decode_32_safe(p, end, osd, e_inval);
-               ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
-               ceph_decode_addr(&addr);
-               pr_info("osd%d up\n", osd);
-               BUG_ON(osd >= map->max_osd);
-               map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
-               map->osd_addr[osd] = addr;
-       }
-
-       /* new_state */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd;
-               u8 xorstate;
-               ceph_decode_32_safe(p, end, osd, e_inval);
-               xorstate = **(u8 **)p;
-               (*p)++;  /* clean flag */
-               if (xorstate == 0)
-                       xorstate = CEPH_OSD_UP;
-               if (xorstate & CEPH_OSD_UP)
-                       pr_info("osd%d down\n", osd);
-               if (osd < map->max_osd)
-                       map->osd_state[osd] ^= xorstate;
-       }
-
-       /* new_weight */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd, off;
-               ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
-               osd = ceph_decode_32(p);
-               off = ceph_decode_32(p);
-               pr_info("osd%d weight 0x%x %s\n", osd, off,
-                    off == CEPH_OSD_IN ? "(in)" :
-                    (off == CEPH_OSD_OUT ? "(out)" : ""));
-               if (osd < map->max_osd)
-                       map->osd_weight[osd] = off;
-       }
+       /* new_up_client, new_state, new_weight */
+       err = decode_new_up_state_weight(p, end, map);
+       if (err)
+               goto bad;
 
        /* new_pg_temp */
        err = decode_new_pg_temp(p, end, map);
index 68adb5f52110d85fead496a8a76a7248ae8cefae..e759d90e8cef037ec117a7e70941c3be050e3103 100644 (file)
 #include <net/sock_reuseport.h>
 
 /**
- *     sk_filter - run a packet through a socket filter
+ *     sk_filter_trim_cap - run a packet through a socket filter
  *     @sk: sock associated with &sk_buff
  *     @skb: buffer to filter
+ *     @cap: limit on how short the eBPF program may trim the packet
  *
  * Run the eBPF program and then cut skb->data to correct size returned by
  * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -64,7 +65,7 @@
  * be accepted or -EPERM if the packet should be tossed.
  *
  */
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
 {
        int err;
        struct sk_filter *filter;
@@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-
-               err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+               err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
        }
        rcu_read_unlock();
 
        return err;
 }
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
 
 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
@@ -2085,7 +2085,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 }
 
 static bool sk_filter_is_valid_access(int off, int size,
-                                     enum bpf_access_type type)
+                                     enum bpf_access_type type,
+                                     enum bpf_reg_type *reg_type)
 {
        switch (off) {
        case offsetof(struct __sk_buff, tc_classid):
@@ -2108,7 +2109,8 @@ static bool sk_filter_is_valid_access(int off, int size,
 }
 
 static bool tc_cls_act_is_valid_access(int off, int size,
-                                      enum bpf_access_type type)
+                                      enum bpf_access_type type,
+                                      enum bpf_reg_type *reg_type)
 {
        if (type == BPF_WRITE) {
                switch (off) {
@@ -2123,6 +2125,16 @@ static bool tc_cls_act_is_valid_access(int off, int size,
                        return false;
                }
        }
+
+       switch (off) {
+       case offsetof(struct __sk_buff, data):
+               *reg_type = PTR_TO_PACKET;
+               break;
+       case offsetof(struct __sk_buff, data_end):
+               *reg_type = PTR_TO_PACKET_END;
+               break;
+       }
+
        return __is_valid_access(off, size, type);
 }
 
index a669dea146c61b2f7f5b1feaf46512b135ee28f7..61ad43f61c5edbffa48f4983d8bf1a7472a66cdc 100644 (file)
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
 }
 EXPORT_SYMBOL(make_flow_keys_digest);
 
+static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
+
+u32 __skb_get_hash_symmetric(struct sk_buff *skb)
+{
+       struct flow_keys keys;
+
+       __flow_hash_secret_init();
+
+       memset(&keys, 0, sizeof(keys));
+       __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
+                          NULL, 0, 0, 0,
+                          FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+       return __flow_hash_from_keys(&keys, hashrnd);
+}
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
 /**
  * __skb_get_hash: calculate a flow hash
  * @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
        },
 };
 
+static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
+       {
+               .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+               .offset = offsetof(struct flow_keys, control),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_BASIC,
+               .offset = offsetof(struct flow_keys, basic),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v4addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v6addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_PORTS,
+               .offset = offsetof(struct flow_keys, ports),
+       },
+};
+
 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
        {
                .key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
        skb_flow_dissector_init(&flow_keys_dissector,
                                flow_keys_dissector_keys,
                                ARRAY_SIZE(flow_keys_dissector_keys));
+       skb_flow_dissector_init(&flow_keys_dissector_symmetric,
+                               flow_keys_dissector_symmetric_keys,
+                               ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
        skb_flow_dissector_init(&flow_keys_buf_dissector,
                                flow_keys_buf_dissector_keys,
                                ARRAY_SIZE(flow_keys_buf_dissector_keys));
index 29dd8cc22bbf87a05d555ff16d62e6dee0b536d4..510cd62fcb9963f92591e03b6c4d40502f2f37bf 100644 (file)
@@ -2469,13 +2469,17 @@ int neigh_xmit(int index, struct net_device *dev,
                tbl = neigh_tables[index];
                if (!tbl)
                        goto out;
+               rcu_read_lock_bh();
                neigh = __neigh_lookup_noref(tbl, addr, dev);
                if (!neigh)
                        neigh = __neigh_create(tbl, addr, dev, false);
                err = PTR_ERR(neigh);
-               if (IS_ERR(neigh))
+               if (IS_ERR(neigh)) {
+                       rcu_read_unlock_bh();
                        goto out_kfree_skb;
+               }
                err = neigh->output(neigh, skb);
+               rcu_read_unlock_bh();
        }
        else if (index == NEIGH_LINK_TABLE) {
                err = dev_hard_header(skb, dev, ntohs(skb->protocol),
index f2b77e549c03a771909cd9c87c40ec2b7826cd31..eb12d2161fb2b5f9f26ebb8f534bbd6a673b65b1 100644 (file)
@@ -3015,24 +3015,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
 }
 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
 
-/**
- *     skb_push_rcsum - push skb and update receive checksum
- *     @skb: buffer to update
- *     @len: length of data pulled
- *
- *     This function performs an skb_push on the packet and updates
- *     the CHECKSUM_COMPLETE checksum.  It should be used on
- *     receive path processing instead of skb_push unless you know
- *     that the checksum difference is zero (e.g., a valid IP header)
- *     or you are setting ip_summed to CHECKSUM_NONE.
- */
-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
-{
-       skb_push(skb, len);
-       skb_postpush_rcsum(skb, skb->data, len);
-       return skb->data;
-}
-
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
index 08bf97eceeb3827d0b237d8c01910e5e0a0f5d6a..25dab8b60223e25ee92dae45a226fce2a6bb5a03 100644 (file)
@@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+                    const int nested, unsigned int trim_cap)
 {
        int rc = NET_RX_SUCCESS;
 
-       if (sk_filter(sk, skb))
+       if (sk_filter_trim_cap(sk, skb, trim_cap))
                goto discard_and_relse;
 
        skb->dev = NULL;
@@ -492,7 +493,7 @@ discard_and_relse:
        kfree_skb(skb);
        goto out;
 }
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
 
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
@@ -1938,6 +1939,10 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
                sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
                sockc->tsflags |= tsflags;
                break;
+       /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
+       case SCM_RIGHTS:
+       case SCM_CREDENTIALS:
+               break;
        default:
                return -EINVAL;
        }
index 5c7e413a3ae407e67565b48a8bd6f43e3b02de4d..345a3aeb8c7e36449a765298cd6512eab8cfef4b 100644 (file)
@@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
-               __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+               IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
                return NULL;
        }
 
@@ -527,17 +527,19 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
                                                                 rxiph->daddr);
        skb_dst_set(skb, dst_clone(dst));
 
+       local_bh_disable();
        bh_lock_sock(ctl_sk);
        err = ip_build_and_send_pkt(skb, ctl_sk,
                                    rxiph->daddr, rxiph->saddr, NULL);
        bh_unlock_sock(ctl_sk);
 
        if (net_xmit_eval(err) == 0) {
-               DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
-               DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+               __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+               __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
        }
+       local_bh_enable();
 out:
-        dst_release(dst);
+       dst_release(dst);
 }
 
 static void dccp_v4_reqsk_destructor(struct request_sock *req)
@@ -866,7 +868,7 @@ lookup:
                goto discard_and_relse;
        nf_reset(skb);
 
-       return sk_receive_skb(sk, skb, 1);
+       return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
 
 no_dccp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
index d176f4e66369a399f5fe8a440eb864dbac9c7542..3ff137d9471d8420748fd72c8631d67eb57ffb66 100644 (file)
@@ -732,7 +732,7 @@ lookup:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+       return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
 
 no_dccp_socket:
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
index df48034378889eac83c6b262a76a32028202efb4..a796fc7cbc3542972eaa19bbc3d3dc07a428ff1c 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/dn_fib.h>
 #include <net/dn_neigh.h>
 #include <net/dn_dev.h>
+#include <net/nexthop.h>
 
 #define RT_MIN_TABLE 1
 
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
        struct rtnexthop *nhp = nla_data(attr);
        int nhs = 0, nhlen = nla_len(attr);
 
-       while(nhlen >= (int)sizeof(struct rtnexthop)) {
-               if ((nhlen -= nhp->rtnh_len) < 0)
-                       return 0;
+       while (rtnh_ok(nhp, nhlen)) {
                nhs++;
-               nhp = RTNH_NEXT(nhp);
+               nhp = rtnh_next(nhp, &nhlen);
        }
 
-       return nhs;
+       /* leftover implies invalid nexthop configuration, discard it */
+       return nhlen > 0 ? 0 : nhs;
 }
 
 static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
        int nhlen = nla_len(attr);
 
        change_nexthops(fi) {
-               int attrlen = nhlen - sizeof(struct rtnexthop);
-               if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
+               int attrlen;
+
+               if (!rtnh_ok(nhp, nhlen))
                        return -EINVAL;
 
                nh->nh_flags  = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
                nh->nh_oif    = nhp->rtnh_ifindex;
                nh->nh_weight = nhp->rtnh_hops + 1;
 
-               if (attrlen) {
+               attrlen = rtnh_attrlen(nhp);
+               if (attrlen > 0) {
                        struct nlattr *gw_attr;
 
                        gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
                        nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
                }
-               nhp = RTNH_NEXT(nhp);
+
+               nhp = rtnh_next(nhp, &nhlen);
        } endfor_nexthops(fi);
 
        return 0;
index 477937465a202ccdf458a808776ea3e90c853b84..d95631d0924899f17549e69801567bed076491b9 100644 (file)
@@ -23,6 +23,11 @@ struct esp_skb_cb {
        void *tmp;
 };
 
+struct esp_output_extra {
+       __be32 seqhi;
+       u32 esphoff;
+};
+
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
@@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
  *
  * TODO: Use spare space in skb for this where possible.
  */
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
 {
        unsigned int len;
 
-       len = seqhilen;
+       len = extralen;
 
        len += crypto_aead_ivsize(aead);
 
@@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
        return kmalloc(len, GFP_ATOMIC);
 }
 
-static inline __be32 *esp_tmp_seqhi(void *tmp)
+static inline void *esp_tmp_extra(void *tmp)
 {
-       return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+       return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
 }
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
 {
        return crypto_aead_ivsize(aead) ?
-              PTR_ALIGN((u8 *)tmp + seqhilen,
-                        crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
+              PTR_ALIGN((u8 *)tmp + extralen,
+                        crypto_aead_alignmask(aead) + 1) : tmp + extralen;
 }
 
 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
@@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 {
        struct ip_esp_hdr *esph = (void *)(skb->data + offset);
        void *tmp = ESP_SKB_CB(skb)->tmp;
-       __be32 *seqhi = esp_tmp_seqhi(tmp);
+       __be32 *seqhi = esp_tmp_extra(tmp);
 
        esph->seq_no = esph->spi;
        esph->spi = *seqhi;
@@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 
 static void esp_output_restore_header(struct sk_buff *skb)
 {
-       esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
+       void *tmp = ESP_SKB_CB(skb)->tmp;
+       struct esp_output_extra *extra = esp_tmp_extra(tmp);
+
+       esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
+                               sizeof(__be32));
 }
 
 static void esp_output_done_esn(struct crypto_async_request *base, int err)
@@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
+       struct esp_output_extra *extra;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
        struct aead_request *req;
@@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        int tfclen;
        int nfrags;
        int assoclen;
-       int seqhilen;
-       __be32 *seqhi;
+       int extralen;
        __be64 seqno;
 
        /* skb is pure payload to encrypt */
@@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        nfrags = err;
 
        assoclen = sizeof(*esph);
-       seqhilen = 0;
+       extralen = 0;
 
        if (x->props.flags & XFRM_STATE_ESN) {
-               seqhilen += sizeof(__be32);
-               assoclen += seqhilen;
+               extralen += sizeof(*extra);
+               assoclen += sizeof(__be32);
        }
 
-       tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+       tmp = esp_alloc_tmp(aead, nfrags, extralen);
        if (!tmp) {
                err = -ENOMEM;
                goto error;
        }
 
-       seqhi = esp_tmp_seqhi(tmp);
-       iv = esp_tmp_iv(aead, tmp, seqhilen);
+       extra = esp_tmp_extra(tmp);
+       iv = esp_tmp_iv(aead, tmp, extralen);
        req = esp_tmp_req(aead, iv);
        sg = esp_req_sg(aead, req);
 
@@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
         * encryption.
         */
        if ((x->props.flags & XFRM_STATE_ESN)) {
-               esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
-               *seqhi = esph->spi;
+               extra->esphoff = (unsigned char *)esph -
+                                skb_transport_header(skb);
+               esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+               extra->seqhi = esph->spi;
                esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
                aead_request_set_callback(req, 0, esp_output_done_esn, skb);
        }
@@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
 
        ESP_SKB_CB(skb)->tmp = tmp;
-       seqhi = esp_tmp_seqhi(tmp);
+       seqhi = esp_tmp_extra(tmp);
        iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_req(aead, iv);
        sg = esp_req_sg(aead, req);
index d09173bf95005be87d4d0cbcc754147b3c8e62c9..539fa264e67d71148364c9fc0e694c78fd35e69b 100644 (file)
@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                if (!rtnh_ok(rtnh, remaining))
                        return -EINVAL;
 
+               if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+                       return -EINVAL;
+
                nexthop_nh->nh_flags =
                        (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
                nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
                goto err_inval;
 
+       if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+               goto err_inval;
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (cfg->fc_mp) {
                nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
index 4c39f4fd332a3569267c5c1330e692c5f40a49e7..de1d119a4497e7d512cba30043f01bb6609f6f2d 100644 (file)
@@ -62,26 +62,26 @@ EXPORT_SYMBOL_GPL(gre_del_protocol);
 
 /* Fills in tpi and returns header length to be pulled. */
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                    bool *csum_err, __be16 proto)
+                    bool *csum_err, __be16 proto, int nhs)
 {
        const struct gre_base_hdr *greh;
        __be32 *options;
        int hdr_len;
 
-       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+       if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
                return -EINVAL;
 
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       greh = (struct gre_base_hdr *)(skb->data + nhs);
        if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
                return -EINVAL;
 
        tpi->flags = gre_flags_to_tnl_flags(greh->flags);
        hdr_len = gre_calc_hlen(tpi->flags);
 
-       if (!pskb_may_pull(skb, hdr_len))
+       if (!pskb_may_pull(skb, nhs + hdr_len))
                return -EINVAL;
 
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       greh = (struct gre_base_hdr *)(skb->data + nhs);
        tpi->proto = greh->protocol;
 
        options = (__be32 *)(greh + 1);
index 4d2025f7ec578b7e3f3fd342e82e734d36395068..1d000af7f5617e32e6dd8ec5e034a87fd449ab06 100644 (file)
 #include <net/gre.h>
 #include <net/dst_metadata.h>
 
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
-#endif
-
 /*
    Problems & solutions
    --------------------
@@ -217,12 +211,14 @@ static void gre_err(struct sk_buff *skb, u32 info)
         * by themselves???
         */
 
+       const struct iphdr *iph = (struct iphdr *)skb->data;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
        struct tnl_ptk_info tpi;
        bool csum_err = false;
 
-       if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
+       if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
+                            iph->ihl * 4) < 0) {
                if (!csum_err)          /* ignore csum errors. */
                        return;
        }
@@ -338,7 +334,7 @@ static int gre_rcv(struct sk_buff *skb)
        }
 #endif
 
-       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
        if (hdr_len < 0)
                goto drop;
 
@@ -1121,6 +1117,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
 {
        struct nlattr *tb[IFLA_MAX + 1];
        struct net_device *dev;
+       LIST_HEAD(list_kill);
        struct ip_tunnel *t;
        int err;
 
@@ -1136,8 +1133,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
        t->collect_md = true;
 
        err = ipgre_newlink(net, dev, tb, NULL);
-       if (err < 0)
-               goto out;
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
 
        /* openvswitch users expect packet sizes to be unrestricted,
         * so set the largest MTU we can.
@@ -1146,9 +1145,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
        if (err)
                goto out;
 
+       err = rtnl_configure_link(dev, NULL);
+       if (err < 0)
+               goto out;
+
        return dev;
 out:
-       free_netdev(dev);
+       ip_tunnel_dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
index 124bf0a663283502deb03397343160d493a378b1..4bd4921639c3e6415f8899896f72fe1564a68c55 100644 (file)
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
                return dst_output(net, sk, skb);
        }
 #endif
-       mtu = ip_skb_dst_mtu(skb);
+       mtu = ip_skb_dst_mtu(sk, skb);
        if (skb_is_gso(skb))
                return ip_finish_output_gso(net, sk, skb, mtu);
 
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
        iph = ip_hdr(skb);
 
-       mtu = ip_skb_dst_mtu(skb);
+       mtu = ip_skb_dst_mtu(sk, skb);
        if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
                mtu = IPCB(skb)->frag_max_size;
 
index 2ed9dd2b5f2f5a6ea3ea52208e5ec1424a501fc5..1d71c40eaaf35a74b1cec69d3ffdbd80b675aae9 100644 (file)
@@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE;            /* My IP address */
 static __be32 ic_netmask = NONE;       /* Netmask for local subnet */
 __be32 ic_gateway = NONE;      /* Gateway IP address */
 
-__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#ifdef IPCONFIG_DYNAMIC
+static __be32 ic_addrservaddr = NONE;  /* IP Address of the IP addresses'server */
+#endif
 
 __be32 ic_servaddr = NONE;     /* Boot server IP address */
 
index 21a38e296fe2da2c4a095c94ab204362d9eacc89..5ad48ec7771007a0c01c8210001ecb17ab012863 100644 (file)
@@ -891,8 +891,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
 {
        struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
 
-       if (c)
+       if (c) {
+               c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
                c->mfc_un.res.minvif = MAXVIFS;
+       }
        return c;
 }
 
index d6c8f4cd080001a527f7c137021cc6a3f3604344..42bf89aaf6a5206cf384068a0a50a3130abe2a4e 100644 (file)
@@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 /* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
 
 int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
@@ -3421,6 +3421,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
        return flag;
 }
 
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+                                  u32 *last_oow_ack_time)
+{
+       if (*last_oow_ack_time) {
+               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+                       NET_INC_STATS(net, mib_idx);
+                       return true;    /* rate-limited: don't send yet! */
+               }
+       }
+
+       *last_oow_ack_time = tcp_time_stamp;
+
+       return false;   /* not rate-limited: go ahead, send dupack now! */
+}
+
 /* Return true if we're currently rate-limiting out-of-window ACKs and
  * thus shouldn't send a dupack right now. We rate-limit dupacks in
  * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
@@ -3434,21 +3451,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
        /* Data packets without SYNs are not likely part of an ACK loop. */
        if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
            !tcp_hdr(skb)->syn)
-               goto not_rate_limited;
-
-       if (*last_oow_ack_time) {
-               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
-               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
-                       NET_INC_STATS(net, mib_idx);
-                       return true;    /* rate-limited: don't send yet! */
-               }
-       }
-
-       *last_oow_ack_time = tcp_time_stamp;
+               return false;
 
-not_rate_limited:
-       return false;   /* not rate-limited: go ahead, send dupack now! */
+       return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
 }
 
 /* RFC 5961 7 [ACK Throttling] */
@@ -3458,21 +3463,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
        static u32 challenge_timestamp;
        static unsigned int challenge_count;
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 now;
+       u32 count, now;
 
        /* First check our per-socket dupack rate limit. */
-       if (tcp_oow_rate_limited(sock_net(sk), skb,
-                                LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
-                                &tp->last_oow_ack_time))
+       if (__tcp_oow_rate_limited(sock_net(sk),
+                                  LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+                                  &tp->last_oow_ack_time))
                return;
 
-       /* Then check the check host-wide RFC 5961 rate limit. */
+       /* Then check host-wide RFC 5961 rate limit. */
        now = jiffies / HZ;
        if (now != challenge_timestamp) {
+               u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
                challenge_timestamp = now;
-               challenge_count = 0;
+               WRITE_ONCE(challenge_count, half +
+                          prandom_u32_max(sysctl_tcp_challenge_ack_limit));
        }
-       if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+       count = READ_ONCE(challenge_count);
+       if (count > 0) {
+               WRITE_ONCE(challenge_count, count - 1);
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
                tcp_send_ack(sk);
        }
index 8bd9911fdd163ab739d2dfda34e053b5e8e0bc03..e00e972c4e6a750d4d01bce3c1bc6125eb49f102 100644 (file)
@@ -2751,7 +2751,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        struct sk_buff *hole = NULL;
-       u32 last_lost;
+       u32 max_segs, last_lost;
        int mib_idx;
        int fwd_rexmitting = 0;
 
@@ -2771,6 +2771,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                last_lost = tp->snd_una;
        }
 
+       max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
        tcp_for_write_queue_from(skb, sk) {
                __u8 sacked = TCP_SKB_CB(skb)->sacked;
                int segs;
@@ -2784,6 +2785,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
                if (segs <= 0)
                        return;
+               /* In case tcp_shift_skb_data() have aggregated large skbs,
+                * we need to make sure not sending too bigs TSO packets
+                */
+               segs = min_t(int, segs, max_segs);
 
                if (fwd_rexmitting) {
 begin_fwd:
index 0ff31d97d48586af00c6973d123576e040ed422a..4aed8fc23d328592f8cf267fca70582f62fc6a3e 100644 (file)
@@ -391,9 +391,9 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
        return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
 }
 
-static inline int compute_score(struct sock *sk, struct net *net,
-                               __be32 saddr, unsigned short hnum, __be16 sport,
-                               __be32 daddr, __be16 dport, int dif)
+static int compute_score(struct sock *sk, struct net *net,
+                        __be32 saddr, __be16 sport,
+                        __be32 daddr, unsigned short hnum, int dif)
 {
        int score;
        struct inet_sock *inet;
@@ -434,52 +434,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
-/*
- * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
- */
-static inline int compute_score2(struct sock *sk, struct net *net,
-                                __be32 saddr, __be16 sport,
-                                __be32 daddr, unsigned int hnum, int dif)
-{
-       int score;
-       struct inet_sock *inet;
-
-       if (!net_eq(sock_net(sk), net) ||
-           ipv6_only_sock(sk))
-               return -1;
-
-       inet = inet_sk(sk);
-
-       if (inet->inet_rcv_saddr != daddr ||
-           inet->inet_num != hnum)
-               return -1;
-
-       score = (sk->sk_family == PF_INET) ? 2 : 1;
-
-       if (inet->inet_daddr) {
-               if (inet->inet_daddr != saddr)
-                       return -1;
-               score += 4;
-       }
-
-       if (inet->inet_dport) {
-               if (inet->inet_dport != sport)
-                       return -1;
-               score += 4;
-       }
-
-       if (sk->sk_bound_dev_if) {
-               if (sk->sk_bound_dev_if != dif)
-                       return -1;
-               score += 4;
-       }
-
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
-               score++;
-
-       return score;
-}
-
 static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
                       const __u16 lport, const __be32 faddr,
                       const __be16 fport)
@@ -492,11 +446,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
                              udp_ehash_secret + net_hash_mix(net));
 }
 
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
                __be32 saddr, __be16 sport,
                __be32 daddr, unsigned int hnum, int dif,
-               struct udp_hslot *hslot2, unsigned int slot2,
+               struct udp_hslot *hslot2,
                struct sk_buff *skb)
 {
        struct sock *sk, *result;
@@ -506,7 +460,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
        result = NULL;
        badness = 0;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-               score = compute_score2(sk, net, saddr, sport,
+               score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
@@ -554,17 +508,22 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 
                result = udp4_lib_lookup2(net, saddr, sport,
                                          daddr, hnum, dif,
-                                         hslot2, slot2, skb);
+                                         hslot2, skb);
                if (!result) {
+                       unsigned int old_slot2 = slot2;
                        hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
                        slot2 = hash2 & udptable->mask;
+                       /* avoid searching the same slot again. */
+                       if (unlikely(slot2 == old_slot2))
+                               return result;
+
                        hslot2 = &udptable->hash2[slot2];
                        if (hslot->count < hslot2->count)
                                goto begin;
 
                        result = udp4_lib_lookup2(net, saddr, sport,
-                                                 htonl(INADDR_ANY), hnum, dif,
-                                                 hslot2, slot2, skb);
+                                                 daddr, hnum, dif,
+                                                 hslot2, skb);
                }
                return result;
        }
@@ -572,8 +531,8 @@ begin:
        result = NULL;
        badness = 0;
        sk_for_each_rcu(sk, &hslot->head) {
-               score = compute_score(sk, net, saddr, hnum, sport,
-                                     daddr, dport, dif);
+               score = compute_score(sk, net, saddr, sport,
+                                     daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -1624,6 +1583,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        if (sk_filter(sk, skb))
                goto drop;
+       if (unlikely(skb->len < sizeof(struct udphdr)))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
@@ -1755,8 +1716,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                        return err;
        }
 
-       return skb_checksum_init_zero_check(skb, proto, uh->check,
-                                           inet_compute_pseudo);
+       /* Note, we are only interested in != 0 or == 0, thus the
+        * force to int.
+        */
+       return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+                                                        inet_compute_pseudo);
 }
 
 /*
index 4527285fcaa2c2c8134c089b88e0cfeeeddad292..a4fa840769690a271b259aee1630fb061bf0ae92 100644 (file)
@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (!(type & ICMPV6_INFOMSG_MASK))
                if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
-                       ping_err(skb, offset, info);
+                       ping_err(skb, offset, ntohl(info));
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
index b2025bf3da4af20a19b61de59808d3ca91eead51..c0cbcb259f5a9acf9157d4626c61f532d8077855 100644 (file)
@@ -78,9 +78,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
         * we accept a checksum of zero here. When we find the socket
         * for the UDP packet we'll check if that socket allows zero checksum
         * for IPv6 (set by socket option).
+        *
+        * Note, we are only interested in != 0 or == 0, thus the
+        * force to int.
         */
-       return skb_checksum_init_zero_check(skb, proto, uh->check,
-                                          ip6_compute_pseudo);
+       return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+                                                        ip6_compute_pseudo);
 }
 EXPORT_SYMBOL(udp6_csum_init);
 
index 1bcef2369d64e6f1325dcab50c14601e6ca5a40a..771be1fa41764aa8ea3b570a058ee84b109903b9 100644 (file)
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                }
        }
 
+       free_percpu(non_pcpu_rt->rt6i_pcpu);
        non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
index fdc9de276ab1aeecfbc917e8718953d0aa0691ee..776d145113e138872f45d97e7f66ff0416762d85 100644 (file)
@@ -468,7 +468,7 @@ static int gre_rcv(struct sk_buff *skb)
        bool csum_err = false;
        int hdr_len;
 
-       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
        if (hdr_len < 0)
                goto drop;
 
index f2e2013f834621fe2598d9dd9b452b83cd4f9018..487ef3bc7bbcde7f3509b76b3a68aee9d2ab7e8b 100644 (file)
@@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
        struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
        if (!c)
                return NULL;
+       c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
        c->mfc_un.res.minvif = MAXMIFS;
        return c;
 }
index 969913da494fdf1d80ce674c3b6c421fdab18d3d..520b7884d0c2b55ab6f5fe4b0e0c86fcad8aab08 100644 (file)
@@ -1782,7 +1782,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
        };
        struct fib6_table *table;
        struct rt6_info *rt;
-       int flags = 0;
+       int flags = RT6_LOOKUP_F_IFACE;
 
        table = fib6_get_table(net, cfg->fc_table);
        if (!table)
index 0a5a255277e562ecb11ab703d964143f21bf85cd..0619ac70836d4ea5ed4e43a1f5c3ac1f870979c4 100644 (file)
@@ -560,13 +560,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, IPPROTO_IPV6, 0);
+                                t->parms.link, 0, iph->protocol, 0);
                err = 0;
                goto out;
        }
        if (type == ICMP_REDIRECT) {
                ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
-                             IPPROTO_IPV6, 0);
+                             iph->protocol, 0);
                err = 0;
                goto out;
        }
index f36c2d076fce0df30d202d7683a67e3614d77fe9..2255d2bf5f6bdb532c51eff6792b3024b3eeefa1 100644 (file)
@@ -738,7 +738,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
                                 u32 ack, u32 win, u32 tsval, u32 tsecr,
                                 int oif, struct tcp_md5sig_key *key, int rst,
-                                u8 tclass, u32 label)
+                                u8 tclass, __be32 label)
 {
        const struct tcphdr *th = tcp_hdr(skb);
        struct tcphdr *t1;
@@ -911,7 +911,7 @@ out:
 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
                            u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key, u8 tclass,
-                           u32 label)
+                           __be32 label)
 {
        tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
                             tclass, label);
index f421c9f23c5bef7bf58937635474713cf4722516..acc09705618b4cccd622122f58ce4ab1639f22d1 100644 (file)
@@ -115,11 +115,10 @@ static void udp_v6_rehash(struct sock *sk)
        udp_lib_rehash(sk, new_hash);
 }
 
-static inline int compute_score(struct sock *sk, struct net *net,
-                               unsigned short hnum,
-                               const struct in6_addr *saddr, __be16 sport,
-                               const struct in6_addr *daddr, __be16 dport,
-                               int dif)
+static int compute_score(struct sock *sk, struct net *net,
+                        const struct in6_addr *saddr, __be16 sport,
+                        const struct in6_addr *daddr, unsigned short hnum,
+                        int dif)
 {
        int score;
        struct inet_sock *inet;
@@ -162,54 +161,11 @@ static inline int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
-static inline int compute_score2(struct sock *sk, struct net *net,
-                                const struct in6_addr *saddr, __be16 sport,
-                                const struct in6_addr *daddr,
-                                unsigned short hnum, int dif)
-{
-       int score;
-       struct inet_sock *inet;
-
-       if (!net_eq(sock_net(sk), net) ||
-           udp_sk(sk)->udp_port_hash != hnum ||
-           sk->sk_family != PF_INET6)
-               return -1;
-
-       if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
-               return -1;
-
-       score = 0;
-       inet = inet_sk(sk);
-
-       if (inet->inet_dport) {
-               if (inet->inet_dport != sport)
-                       return -1;
-               score++;
-       }
-
-       if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
-               if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
-                       return -1;
-               score++;
-       }
-
-       if (sk->sk_bound_dev_if) {
-               if (sk->sk_bound_dev_if != dif)
-                       return -1;
-               score++;
-       }
-
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
-               score++;
-
-       return score;
-}
-
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
 static struct sock *udp6_lib_lookup2(struct net *net,
                const struct in6_addr *saddr, __be16 sport,
                const struct in6_addr *daddr, unsigned int hnum, int dif,
-               struct udp_hslot *hslot2, unsigned int slot2,
+               struct udp_hslot *hslot2,
                struct sk_buff *skb)
 {
        struct sock *sk, *result;
@@ -219,7 +175,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
        result = NULL;
        badness = -1;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-               score = compute_score2(sk, net, saddr, sport,
+               score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
@@ -268,17 +224,22 @@ struct sock *__udp6_lib_lookup(struct net *net,
 
                result = udp6_lib_lookup2(net, saddr, sport,
                                          daddr, hnum, dif,
-                                         hslot2, slot2, skb);
+                                         hslot2, skb);
                if (!result) {
+                       unsigned int old_slot2 = slot2;
                        hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
                        slot2 = hash2 & udptable->mask;
+                       /* avoid searching the same slot again. */
+                       if (unlikely(slot2 == old_slot2))
+                               return result;
+
                        hslot2 = &udptable->hash2[slot2];
                        if (hslot->count < hslot2->count)
                                goto begin;
 
                        result = udp6_lib_lookup2(net, saddr, sport,
-                                                 &in6addr_any, hnum, dif,
-                                                 hslot2, slot2, skb);
+                                                 daddr, hnum, dif,
+                                                 hslot2, skb);
                }
                return result;
        }
@@ -286,7 +247,7 @@ begin:
        result = NULL;
        badness = -1;
        sk_for_each_rcu(sk, &hslot->head) {
-               score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
+               score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -659,6 +620,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        if (sk_filter(sk, skb))
                goto drop;
+       if (unlikely(skb->len < sizeof(struct udphdr)))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index 738008726cc6f74710382ea1771a5a6118aa7a94..fda7f4715c58fa4f457f4410e1e3998819a1e77e 100644 (file)
@@ -241,6 +241,7 @@ static const struct file_operations kcm_seq_fops = {
        .open           = kcm_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
+       .release        = seq_release_net,
 };
 
 static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
index 21b1fdf5d01d50d751ac6c3cf86670ecf98850be..6a1603bcdcedbccd99627d09d2b0367b361e1454 100644 (file)
@@ -148,14 +148,17 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 void mesh_sta_cleanup(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u32 changed;
+       u32 changed = 0;
 
        /*
         * maybe userspace handles peer allocation and peering, but in either
         * case the beacon is still generated by the kernel and we might need
         * an update.
         */
-       changed = mesh_accept_plinks_update(sdata);
+       if (sdata->u.mesh.user_mpm &&
+           sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+               changed |= mesh_plink_dec_estab_count(sdata);
+       changed |= mesh_accept_plinks_update(sdata);
        if (!sdata->u.mesh.user_mpm) {
                changed |= mesh_plink_deactivate(sta);
                del_timer_sync(&sta->mesh->plink_timer);
index 803001a45aa16e6b5a372ab385dba8e9c09bd2f0..1b07578bedf336c53e3b6072c8c3324f7f18081b 100644 (file)
@@ -1545,7 +1545,8 @@ error:
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+                                       int ifindex)
 {
        /* multicast addr */
        union ipvs_sockaddr mcast_addr;
@@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
                set_sock_size(sock->sk, 0, result);
 
        get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+       sock->sk->sk_bound_dev_if = ifindex;
        result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
        if (result < 0) {
                pr_err("Error binding to the multicast addr\n");
@@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                if (state == IP_VS_STATE_MASTER)
                        sock = make_send_sock(ipvs, id);
                else
-                       sock = make_receive_sock(ipvs, id);
+                       sock = make_receive_sock(ipvs, id, dev->ifindex);
                if (IS_ERR(sock)) {
                        result = PTR_ERR(sock);
                        goto outtinfo;
index db2312eeb2a47c44db0f0ac5a529a10a0a8f8d2f..9f530adad10d54a9277a9fbfdee4236055e3f99a 100644 (file)
@@ -646,6 +646,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
 
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
        if (l4proto->allow_clash &&
+           !nfct_nat(ct) &&
            !nf_ct_is_dying(ct) &&
            atomic_inc_not_zero(&ct->ct_general.use)) {
                nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
@@ -1544,6 +1545,8 @@ void nf_conntrack_cleanup_end(void)
        nf_conntrack_tstamp_fini();
        nf_conntrack_acct_fini();
        nf_conntrack_expect_fini();
+
+       kmem_cache_destroy(nf_conntrack_cachep);
 }
 
 /*
@@ -1599,8 +1602,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        unsigned int nr_slots, i;
        size_t sz;
 
+       if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+       if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
index 7b7aa871a174fa2a1fd1e002fe0838231035e305..cf7c74599cbe5e6b800b584bee75d60f62d84a73 100644 (file)
@@ -1724,9 +1724,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
 
        err = nf_tables_newexpr(ctx, &info, expr);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        return expr;
+err3:
+       kfree(expr);
 err2:
        module_put(info.ops->type->owner);
 err1:
@@ -2946,24 +2948,20 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                 * jumps are already validated for that chain.
                 */
                list_for_each_entry(i, &set->bindings, list) {
-                       if (binding->flags & NFT_SET_MAP &&
+                       if (i->flags & NFT_SET_MAP &&
                            i->chain == binding->chain)
                                goto bind;
                }
 
+               iter.genmask    = nft_genmask_next(ctx->net);
                iter.skip       = 0;
                iter.count      = 0;
                iter.err        = 0;
                iter.fn         = nf_tables_bind_check_setelem;
 
                set->ops->walk(ctx, set, &iter);
-               if (iter.err < 0) {
-                       /* Destroy anonymous sets if binding fails */
-                       if (set->flags & NFT_SET_ANONYMOUS)
-                               nf_tables_set_destroy(ctx, set);
-
+               if (iter.err < 0)
                        return iter.err;
-               }
        }
 bind:
        binding->chain = ctx->chain;
@@ -3192,12 +3190,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        if (nest == NULL)
                goto nla_put_failure;
 
-       args.cb         = cb;
-       args.skb        = skb;
-       args.iter.skip  = cb->args[0];
-       args.iter.count = 0;
-       args.iter.err   = 0;
-       args.iter.fn    = nf_tables_dump_setelem;
+       args.cb                 = cb;
+       args.skb                = skb;
+       args.iter.genmask       = nft_genmask_cur(ctx.net);
+       args.iter.skip          = cb->args[0];
+       args.iter.count         = 0;
+       args.iter.err           = 0;
+       args.iter.fn            = nf_tables_dump_setelem;
        set->ops->walk(&ctx, set, &args.iter);
 
        nla_nest_end(skb, nest);
@@ -4284,6 +4283,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                            binding->chain != chain)
                                continue;
 
+                       iter.genmask    = nft_genmask_next(ctx->net);
                        iter.skip       = 0;
                        iter.count      = 0;
                        iter.err        = 0;
index e9f8dffcc244573e0fe209a9f03318f2bc53968d..fb8b5892b5ffa8ec6b0cf35792991b9125f082eb 100644 (file)
@@ -143,7 +143,7 @@ next_rule:
        list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
 
                /* This rule is not active, skip. */
-               if (unlikely(rule->genmask & (1 << gencursor)))
+               if (unlikely(rule->genmask & gencursor))
                        continue;
 
                rulenum++;
index 137e308d5b24c0865336da92e97686d8fef0b9d1..81fbb450783e59932ecc1c67147fc0ca89b475d3 100644 (file)
@@ -54,7 +54,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
        const struct nf_conn_help *help;
        const struct nf_conntrack_tuple *tuple;
        const struct nf_conntrack_helper *helper;
-       long diff;
        unsigned int state;
 
        ct = nf_ct_get(pkt->skb, &ctinfo);
@@ -94,10 +93,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
 #endif
        case NFT_CT_EXPIRATION:
-               diff = (long)jiffies - (long)ct->timeout.expires;
-               if (diff < 0)
-                       diff = 0;
-               *dest = jiffies_to_msecs(diff);
+               *dest = jiffies_to_msecs(nf_ct_expires(ct));
                return;
        case NFT_CT_HELPER:
                if (ct->master == NULL)
index 6fa016564f90cbf821a4ae45ac580b2e4e7c43ec..f39c53a159eba6cbe9586599b04147e8247d9523 100644 (file)
@@ -189,7 +189,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
        struct nft_hash_elem *he;
        struct rhashtable_iter hti;
        struct nft_set_elem elem;
-       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
        int err;
 
        err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
@@ -218,7 +217,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                        goto cont;
                if (nft_set_elem_expired(&he->ext))
                        goto cont;
-               if (!nft_set_elem_active(&he->ext, genmask))
+               if (!nft_set_elem_active(&he->ext, iter->genmask))
                        goto cont;
 
                elem.priv = he;
index 16c50b0dd426840f79bda0bb3ebbd28ecb2845e5..f4bad9dc15c48b0d8635632dc50179457b5c49c1 100644 (file)
@@ -227,7 +227,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
                        skb->pkt_type = value;
                break;
        case NFT_META_NFTRACE:
-               skb->nf_trace = 1;
+               skb->nf_trace = !!value;
                break;
        default:
                WARN_ON(1);
index f762094af7c1ca7e7684448381e7e49ee430045e..7201d57b5a932ebbb8d7fe72c691d5e69ff99784 100644 (file)
@@ -211,7 +211,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
        struct nft_rbtree_elem *rbe;
        struct nft_set_elem elem;
        struct rb_node *node;
-       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 
        spin_lock_bh(&nft_rbtree_lock);
        for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
@@ -219,7 +218,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
                if (iter->count < iter->skip)
                        goto cont;
-               if (!nft_set_elem_active(&rbe->ext, genmask))
+               if (!nft_set_elem_active(&rbe->ext, iter->genmask))
                        goto cont;
 
                elem.priv = rbe;
index 3d5feede962dc584408e9b5a79e4b723f5308319..d84312584ee46a5fa82040c1802b98897f4b5aef 100644 (file)
@@ -818,8 +818,18 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                 */
                state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
                __ovs_ct_update_key(key, state, &info->zone, exp->master);
-       } else
-               return __ovs_ct_lookup(net, key, info, skb);
+       } else {
+               struct nf_conn *ct;
+               int err;
+
+               err = __ovs_ct_lookup(net, key, info, skb);
+               if (err)
+                       return err;
+
+               ct = (struct nf_conn *)skb->nfct;
+               if (ct)
+                       nf_ct_deliver_cached_events(ct);
+       }
 
        return 0;
 }
index 9bff6ef16fa7632fcfc05f23dd696d75ead6d5e8..b43c4015b2f79678bb5a2edffdd4e57cf9bdb880 100644 (file)
@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
                                      struct sk_buff *skb,
                                      unsigned int num)
 {
-       return reciprocal_scale(skb_get_hash(skb), num);
+       return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
 }
 
 static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1927,13 +1927,11 @@ retry:
                goto out_unlock;
        }
 
-       sockc.tsflags = 0;
+       sockc.tsflags = sk->sk_tsflags;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
-               if (unlikely(err)) {
-                       err = -EINVAL;
+               if (unlikely(err))
                        goto out_unlock;
-               }
        }
 
        skb->protocol = proto;
@@ -2678,7 +2676,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
        }
 
-       sockc.tsflags = 0;
+       sockc.tsflags = po->sk.sk_tsflags;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(&po->sk, msg, &sockc);
                if (unlikely(err))
@@ -2881,7 +2879,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (unlikely(!(dev->flags & IFF_UP)))
                goto out_unlock;
 
-       sockc.tsflags = 0;
+       sockc.tsflags = sk->sk_tsflags;
        sockc.mark = sk->sk_mark;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
index 310cabce23111cfaa45af97adef28f4d41d1bbda..7c2a65a6af5c6ab96eeef220c854ba01e846e35c 100644 (file)
@@ -111,7 +111,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
                }
        }
 
-       if (conn->c_version < RDS_PROTOCOL(3,1)) {
+       if (conn->c_version < RDS_PROTOCOL(3, 1)) {
                printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
                       " no longer supported\n",
                       &conn->c_faddr,
index 6b12b68541ae96fb8be76e72cb8d0e6f8c89abee..814173b466d9a80b21da91c5e38055221e9abcf8 100644 (file)
@@ -95,8 +95,9 @@ out:
  */
 static void rds_loop_inc_free(struct rds_incoming *inc)
 {
-        struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
-        rds_message_put(rm);
+       struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+
+       rds_message_put(rm);
 }
 
 /* we need to at least give the thread something to succeed */
index c173f69e1479bfaf643b9e5c69f4c9a151b18c67..e381bbcd9cc1c37d93eb19647b2a2262d8d2bae9 100644 (file)
@@ -102,7 +102,8 @@ int rds_sysctl_init(void)
        rds_sysctl_reconnect_min = msecs_to_jiffies(1);
        rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
 
-       rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
+       rds_sysctl_reg_table =
+               register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
        if (!rds_sysctl_reg_table)
                return -ENOMEM;
        return 0;
index 74ee126a6fe6c00ce8bce45053ec2b8805fc222e..c8a7b4c90190cafe8dc6fefe3e0a1935c558c357 100644 (file)
@@ -616,7 +616,7 @@ static int rds_tcp_init(void)
 
        ret = rds_tcp_recv_init();
        if (ret)
-               goto out_slab;
+               goto out_pernet;
 
        ret = rds_trans_register(&rds_tcp_transport);
        if (ret)
@@ -628,8 +628,9 @@ static int rds_tcp_init(void)
 
 out_recv:
        rds_tcp_recv_exit();
-out_slab:
+out_pernet:
        unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
        kmem_cache_destroy(rds_tcp_conn_slab);
 out:
        return ret;
index ec0602b0dc24b443c40437d5697037082d270f44..7940babf6c718617c729247ac348b2b6ed68a53a 100644 (file)
@@ -83,7 +83,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_tcp_xmit_prepare(struct rds_connection *conn);
 void rds_tcp_xmit_complete(struct rds_connection *conn);
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
-                unsigned int hdr_off, unsigned int sg, unsigned int off);
+                unsigned int hdr_off, unsigned int sg, unsigned int off);
 void rds_tcp_write_space(struct sock *sk);
 
 /* tcp_stats.c */
index fba13d0305fb25f12c476e6014a2a9d4c5559075..f6e95d60db54b488e9e67375bf96b6cbda82d788 100644 (file)
@@ -54,19 +54,19 @@ void rds_tcp_state_change(struct sock *sk)
 
        rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
 
-       switch(sk->sk_state) {
-               /* ignore connecting sockets as they make progress */
-               case TCP_SYN_SENT:
-               case TCP_SYN_RECV:
-                       break;
-               case TCP_ESTABLISHED:
-                       rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
-                       break;
-               case TCP_CLOSE_WAIT:
-               case TCP_CLOSE:
-                       rds_conn_drop(conn);
-               default:
-                       break;
+       switch (sk->sk_state) {
+       /* ignore connecting sockets as they make progress */
+       case TCP_SYN_SENT:
+       case TCP_SYN_RECV:
+               break;
+       case TCP_ESTABLISHED:
+               rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+               break;
+       case TCP_CLOSE_WAIT:
+       case TCP_CLOSE:
+               rds_conn_drop(conn);
+       default:
+               break;
        }
 out:
        read_unlock_bh(&sk->sk_callback_lock);
index 686b1d03a55858aeb59d9863339c2c8e99e03d90..245542ca4718dd69f6f3e87903136d8307e4f055 100644 (file)
@@ -138,7 +138,7 @@ int rds_tcp_accept_one(struct socket *sock)
                        rds_tcp_reset_callbacks(new_sock, conn);
                        conn->c_outgoing = 0;
                        /* rds_connect_path_complete() marks RDS_CONN_UP */
-                       rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
+                       rds_connect_path_complete(conn, RDS_CONN_RESETTING);
                }
        } else {
                rds_tcp_set_callbacks(new_sock, conn);
index c3196f9d070aaf9429947969120662f974ae44a7..6e6a7111a03406ae9a430c3c5efe6f56fe08a8da 100644 (file)
@@ -171,7 +171,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
        while (left) {
                if (!tinc) {
                        tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
-                                               arg->gfp);
+                                               arg->gfp);
                        if (!tinc) {
                                desc->error = -ENOMEM;
                                goto out;
index 22d0f2020a79f834d3a2a6536253e16c690baa41..618be69c9c3b7668c040584b1c4b40a8058f5e38 100644 (file)
@@ -66,19 +66,19 @@ void rds_tcp_xmit_complete(struct rds_connection *conn)
 static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
 {
        struct kvec vec = {
-                .iov_base = data,
-                .iov_len = len,
+               .iov_base = data,
+               .iov_len = len,
+       };
+       struct msghdr msg = {
+               .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
        };
-        struct msghdr msg = {
-                .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
-        };
 
        return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
 }
 
 /* the core send_sem serializes this with other xmit and shutdown */
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
-                unsigned int hdr_off, unsigned int sg, unsigned int off)
+                unsigned int hdr_off, unsigned int sg, unsigned int off)
 {
        struct rds_tcp_connection *tc = conn->c_transport_data;
        int done = 0;
@@ -196,7 +196,7 @@ void rds_tcp_write_space(struct sock *sk)
        tc->t_last_seen_una = rds_tcp_snd_una(tc);
        rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+       if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
index f3afd1d60d3c7a7880993554c6375754fb4c6ff8..2ffd3e30c6434e62333ac4eefdc1b8fea50dfbe1 100644 (file)
@@ -140,8 +140,7 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
        rds_info_iter_unmap(iter);
        down_read(&rds_trans_sem);
 
-       for (i = 0; i < RDS_TRANS_COUNT; i++)
-       {
+       for (i = 0; i < RDS_TRANS_COUNT; i++) {
                trans = transports[i];
                if (!trans || !trans->stats_info_copy)
                        continue;
index 79c4abcfa6b4ee86fbd92358f5585f88b74319f5..0a6394754e81db5469906b92f4b40046444bdb42 100644 (file)
@@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
                rose_frames_acked(sk, nr);
                if (ns == rose->vr) {
                        rose_start_idletimer(sk);
-                       if (sock_queue_rcv_skb(sk, skb) == 0) {
+                       if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+                           __sock_queue_rcv_skb(sk, skb) == 0) {
                                rose->vr = (rose->vr + 1) % ROSE_MODULUS;
                                queued = 1;
                        } else {
index 336774a535c3959f4f25b05d1732c014d0d1763c..c7a0b0d481c08ab7533d273e57c1897c2760d1d0 100644 (file)
@@ -1118,7 +1118,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                nla_nest_end(skb, nest);
                ret = skb->len;
        } else
-               nla_nest_cancel(skb, nest);
+               nlmsg_trim(skb, b);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        if (NETLINK_CB(cb->skb).portid && ret)
index 658046dfe02d7210501ee9f0324158b50f6e7858..ea4a2fef1b71c506a89ca668c115db367aaa79a6 100644 (file)
@@ -106,9 +106,9 @@ int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
 }
 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
 
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
 {
-       mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+       mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
        if (!mi->metaval)
                return -ENOMEM;
 
@@ -116,9 +116,9 @@ int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
 }
 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
 
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
 {
-       mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+       mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
        if (!mi->metaval)
                return -ENOMEM;
 
@@ -240,10 +240,10 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
 */
 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
-                               void *val, int len)
+                               void *val, int len, bool exists)
 {
        struct tcf_meta_ops *ops = find_ife_oplist(metaid);
        int ret = 0;
@@ -251,11 +251,13 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
        if (!ops) {
                ret = -ENOENT;
 #ifdef CONFIG_MODULES
-               spin_unlock_bh(&ife->tcf_lock);
+               if (exists)
+                       spin_unlock_bh(&ife->tcf_lock);
                rtnl_unlock();
                request_module("ifemeta%u", metaid);
                rtnl_lock();
-               spin_lock_bh(&ife->tcf_lock);
+               if (exists)
+                       spin_lock_bh(&ife->tcf_lock);
                ops = find_ife_oplist(metaid);
 #endif
        }
@@ -272,10 +274,10 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
 */
 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
-                       int len)
+                       int len, bool atomic)
 {
        struct tcf_meta_info *mi = NULL;
        struct tcf_meta_ops *ops = find_ife_oplist(metaid);
@@ -284,7 +286,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
        if (!ops)
                return -ENOENT;
 
-       mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+       mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
        if (!mi) {
                /*put back what find_ife_oplist took */
                module_put(ops->owner);
@@ -294,7 +296,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
        mi->metaid = metaid;
        mi->ops = ops;
        if (len > 0) {
-               ret = ops->alloc(mi, metaval);
+               ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
                if (ret != 0) {
                        kfree(mi);
                        module_put(ops->owner);
@@ -313,11 +315,13 @@ static int use_all_metadata(struct tcf_ife_info *ife)
        int rc = 0;
        int installed = 0;
 
+       read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
-               rc = add_metainfo(ife, o->metaid, NULL, 0);
+               rc = add_metainfo(ife, o->metaid, NULL, 0, true);
                if (rc == 0)
                        installed += 1;
        }
+       read_unlock(&ife_mod_lock);
 
        if (installed)
                return 0;
@@ -385,8 +389,9 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
        spin_unlock_bh(&ife->tcf_lock);
 }
 
-/* under ife->tcf_lock */
-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+/* under ife->tcf_lock for existing action */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+                            bool exists)
 {
        int len = 0;
        int rc = 0;
@@ -398,11 +403,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
                        val = nla_data(tb[i]);
                        len = nla_len(tb[i]);
 
-                       rc = load_metaops_and_vet(ife, i, val, len);
+                       rc = load_metaops_and_vet(ife, i, val, len, exists);
                        if (rc != 0)
                                return rc;
 
-                       rc = add_metainfo(ife, i, val, len);
+                       rc = add_metainfo(ife, i, val, len, exists);
                        if (rc)
                                return rc;
                }
@@ -474,7 +479,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        saddr = nla_data(tb[TCA_IFE_SMAC]);
        }
 
-       spin_lock_bh(&ife->tcf_lock);
+       if (exists)
+               spin_lock_bh(&ife->tcf_lock);
        ife->tcf_action = parm->action;
 
        if (parm->flags & IFE_ENCODE) {
@@ -504,11 +510,12 @@ metadata_parse_err:
                        if (ret == ACT_P_CREATED)
                                _tcf_ife_cleanup(a, bind);
 
-                       spin_unlock_bh(&ife->tcf_lock);
+                       if (exists)
+                               spin_unlock_bh(&ife->tcf_lock);
                        return err;
                }
 
-               err = populate_metalist(ife, tb2);
+               err = populate_metalist(ife, tb2, exists);
                if (err)
                        goto metadata_parse_err;
 
@@ -523,12 +530,14 @@ metadata_parse_err:
                        if (ret == ACT_P_CREATED)
                                _tcf_ife_cleanup(a, bind);
 
-                       spin_unlock_bh(&ife->tcf_lock);
+                       if (exists)
+                               spin_unlock_bh(&ife->tcf_lock);
                        return err;
                }
        }
 
-       spin_unlock_bh(&ife->tcf_lock);
+       if (exists)
+               spin_unlock_bh(&ife->tcf_lock);
 
        if (ret == ACT_P_CREATED)
                tcf_hash_insert(tn, a);
index 9f002ada7074ccdb648433b443870562962c8694..d4bd19ee5822d4849cbf6c473d3245fb2cbff094 100644 (file)
@@ -121,10 +121,13 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
+       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
+       }
 
-       if (!tcf_hash_check(tn, index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
                                      false);
                if (ret)
index 128942bc9e42e82ed11dad14448b69cd6858e541..1f5bd6ccbd2c6162f328aa209e2140bad99f1e3f 100644 (file)
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        if (!(at & AT_EGRESS)) {
                if (m->tcfm_ok_push)
-                       skb_push(skb2, skb->mac_len);
+                       skb_push_rcsum(skb2, skb->mac_len);
        }
 
        /* mirror is always swallowed */
index 2177eac0a61ed00c6c60655f577e0dd816fd2c08..2e4bd2c0a50c497fbc6cbfe5338cab981559b491 100644 (file)
@@ -37,14 +37,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
+       unsigned int prev_backlog;
+
        if (likely(skb_queue_len(&sch->q) < sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
+       prev_backlog = sch->qstats.backlog;
        /* queue full, remove one skb to fulfill the limit */
        __qdisc_queue_drop_head(sch, &sch->q);
        qdisc_qstats_drop(sch);
        qdisc_enqueue_tail(skb, sch);
 
+       qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
        return NET_XMIT_CN;
 }
 
index d4b4218af6b1b8dc9824408a3b4f62986cae3f18..052f84d6cc236176b64adbd2c2c3c7f38b18cb43 100644 (file)
@@ -1007,7 +1007,9 @@ static void htb_work_func(struct work_struct *work)
        struct htb_sched *q = container_of(work, struct htb_sched, work);
        struct Qdisc *sch = q->watchdog.qdisc;
 
+       rcu_read_lock();
        __netif_schedule(qdisc_root(sch));
+       rcu_read_unlock();
 }
 
 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
@@ -1138,8 +1140,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 
        if (!cl->level && cl->un.leaf.q)
                qlen = cl->un.leaf.q->q.qlen;
-       cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
-       cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
+       cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
+                                   INT_MIN, INT_MAX);
+       cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
+                                    INT_MIN, INT_MAX);
 
        if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
index 205bed00dd3463c62696ecc61eb78f2c97b3d0c9..178f1630a036414629aed50256cf6a8c3a105984 100644 (file)
@@ -650,14 +650,14 @@ deliver:
 #endif
 
                        if (q->qdisc) {
+                               unsigned int pkt_len = qdisc_pkt_len(skb);
                                int err = qdisc_enqueue(skb, q->qdisc);
 
-                               if (unlikely(err != NET_XMIT_SUCCESS)) {
-                                       if (net_xmit_drop_count(err)) {
-                                               qdisc_qstats_drop(sch);
-                                               qdisc_tree_reduce_backlog(sch, 1,
-                                                                         qdisc_pkt_len(skb));
-                                       }
+                               if (err != NET_XMIT_SUCCESS &&
+                                   net_xmit_drop_count(err)) {
+                                       qdisc_qstats_drop(sch);
+                                       qdisc_tree_reduce_backlog(sch, 1,
+                                                                 pkt_len);
                                }
                                goto tfifo_dequeue;
                        }
index 4b0a82191bc4011c9574a029fe8c30133c5f6f50..a356450b747ba57552d6c1e1e35255935580f02a 100644 (file)
@@ -172,8 +172,9 @@ prio_destroy(struct Qdisc *sch)
 static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *queues[TCQ_PRIO_BANDS];
+       int oldbands = q->bands, i;
        struct tc_prio_qopt *qopt;
-       int i;
 
        if (nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
@@ -187,62 +188,42 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
                        return -EINVAL;
        }
 
+       /* Before commit, make sure we can allocate all new qdiscs */
+       for (i = oldbands; i < qopt->bands; i++) {
+               queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                             TC_H_MAKE(sch->handle, i + 1));
+               if (!queues[i]) {
+                       while (i > oldbands)
+                               qdisc_destroy(queues[--i]);
+                       return -ENOMEM;
+               }
+       }
+
        sch_tree_lock(sch);
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
+       for (i = q->bands; i < oldbands; i++) {
                struct Qdisc *child = q->queues[i];
-               q->queues[i] = &noop_qdisc;
-               if (child != &noop_qdisc) {
-                       qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
-                       qdisc_destroy(child);
-               }
-       }
-       sch_tree_unlock(sch);
 
-       for (i = 0; i < q->bands; i++) {
-               if (q->queues[i] == &noop_qdisc) {
-                       struct Qdisc *child, *old;
-
-                       child = qdisc_create_dflt(sch->dev_queue,
-                                                 &pfifo_qdisc_ops,
-                                                 TC_H_MAKE(sch->handle, i + 1));
-                       if (child) {
-                               sch_tree_lock(sch);
-                               old = q->queues[i];
-                               q->queues[i] = child;
-
-                               if (old != &noop_qdisc) {
-                                       qdisc_tree_reduce_backlog(old,
-                                                                 old->q.qlen,
-                                                                 old->qstats.backlog);
-                                       qdisc_destroy(old);
-                               }
-                               sch_tree_unlock(sch);
-                       }
-               }
+               qdisc_tree_reduce_backlog(child, child->q.qlen,
+                                         child->qstats.backlog);
+               qdisc_destroy(child);
        }
+
+       for (i = oldbands; i < q->bands; i++)
+               q->queues[i] = queues[i];
+
+       sch_tree_unlock(sch);
        return 0;
 }
 
 static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 {
-       struct prio_sched_data *q = qdisc_priv(sch);
-       int i;
-
-       for (i = 0; i < TCQ_PRIO_BANDS; i++)
-               q->queues[i] = &noop_qdisc;
-
-       if (opt == NULL) {
+       if (!opt)
                return -EINVAL;
-       } else {
-               int err;
 
-               if ((err = prio_tune(sch, opt)) != 0)
-                       return err;
-       }
-       return 0;
+       return prio_tune(sch, opt);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
index a701527a9480faff1b8d91257e1dbf3c0f09ed68..47cf4604d19c23a4bf392c87d06de3e44680d5c0 100644 (file)
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
        struct sctp_ep_common *rcvr;
        struct sctp_transport *transport = NULL;
        struct sctp_chunk *chunk;
-       struct sctphdr *sh;
        union sctp_addr src;
        union sctp_addr dest;
        int family;
@@ -127,8 +126,6 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb_linearize(skb))
                goto discard_it;
 
-       sh = sctp_hdr(skb);
-
        /* Pull up the IP and SCTP headers. */
        __skb_pull(skb, skb_transport_offset(skb));
        if (skb->len < sizeof(struct sctphdr))
@@ -230,7 +227,7 @@ int sctp_rcv(struct sk_buff *skb)
        chunk->rcvr = rcvr;
 
        /* Remember the SCTP header. */
-       chunk->sctp_hdr = sh;
+       chunk->sctp_hdr = sctp_hdr(skb);
 
        /* Set the source and destination addresses of the incoming chunk.  */
        sctp_init_addrs(chunk, &src, &dest);
index 1ce724b87618a08c147b768a33c2fcef7f9ff6cb..f69edcf219e514d864c4af2d57eb0429e8f4938a 100644 (file)
@@ -3,12 +3,6 @@
 #include <linux/sock_diag.h>
 #include <net/sctp/sctp.h>
 
-extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
-                                     struct sock *sk);
-extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
-                                   struct inet_diag_msg *r, int ext,
-                                   struct user_namespace *user_ns);
-
 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                               void *info);
 
index 6f11c62bc8f9cbae5d9d69da5127e9f92e000f2f..a597708ae3818b25f9eaee8f1c3584865432a37d 100644 (file)
@@ -330,6 +330,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
        return 0;
 }
 
+/* tipc_bearer_reset_all - reset all links on all bearers
+ */
+void tipc_bearer_reset_all(struct net *net)
+{
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bearer *b;
+       int i;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               b = rcu_dereference_rtnl(tn->bearer_list[i]);
+               if (b)
+                       tipc_reset_bearer(net, b);
+       }
+}
+
 /**
  * bearer_disable
  *
@@ -405,7 +420,7 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
                return 0;
 
        /* Send RESET message even if bearer is detached from device */
-       tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+       tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
        if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
                goto drop;
 
index f686e41b5abb880cc09f9c1a707356047d32749f..60e49c3be19c1e44b44175926b10c6aecb64fa58 100644 (file)
@@ -198,6 +198,7 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
 void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
 struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
 struct tipc_media *tipc_media_find(const char *name);
+void tipc_bearer_reset_all(struct net *net);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
index 7059c94f33c55f03ce70e193a017cbf2bb441f3e..7d89f8713d4984f3a9d732c9cabc56d410219ad8 100644 (file)
@@ -349,6 +349,8 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
        u16 ack = snd_l->snd_nxt - 1;
 
        snd_l->ackers--;
+       rcv_l->bc_peer_is_up = true;
+       rcv_l->state = LINK_ESTABLISHED;
        tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
        tipc_link_reset(rcv_l);
        rcv_l->state = LINK_RESET;
@@ -704,7 +706,8 @@ static void link_profile_stats(struct tipc_link *l)
  */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       int mtyp, rc = 0;
+       int mtyp = 0;
+       int rc = 0;
        bool state = false;
        bool probe = false;
        bool setup = false;
@@ -1558,7 +1561,12 @@ void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
        if (!msg_peer_node_is_up(hdr))
                return;
 
-       l->bc_peer_is_up = true;
+       /* Open when peer ackowledges our bcast init msg (pkt #1) */
+       if (msg_ack(hdr))
+               l->bc_peer_is_up = true;
+
+       if (!l->bc_peer_is_up)
+               return;
 
        /* Ignore if peers_snd_nxt goes beyond receive window */
        if (more(peers_snd_nxt, l->rcv_nxt + l->window))
index 8740930f07872ff1ec8ff9d6b712b2772ba48336..17201aa8423ddd816a4792621e289dbeee8d4928 100644 (file)
@@ -41,6 +41,8 @@
 #include "name_table.h"
 
 #define MAX_FORWARD_SIZE 1024
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_TAILROOM 16
 
 static unsigned int align(unsigned int i)
 {
@@ -505,6 +507,10 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
                msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        }
 
+       if (skb_cloned(_skb) &&
+           pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+               goto exit;
+
        /* Now reverse the concerned fields */
        msg_set_errcode(hdr, err);
        msg_set_origport(hdr, msg_destport(&ohdr));
index 024da8af91f0edf55e78c30edc682fe5741c6198..7cf52fb39bee6aa38379e81af31fe3d5c6fea557 100644 (file)
@@ -94,17 +94,6 @@ struct plist;
 
 #define TIPC_MEDIA_INFO_OFFSET 5
 
-/**
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- *       are word aligned for quicker access
- */
-#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-
 struct tipc_skb_cb {
        void *handle;
        struct sk_buff *tail;
index 3ad9fab1985f1cdca35c876f0f228d28b9b5db00..1fd4647647650b75f17f41d19d288be7abe436a3 100644 (file)
@@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
 
        link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
        link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
-       nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
+       nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
                    TIPC_MAX_LINK_NAME);
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
index e01e2c71b5a16fde975b6e87bd134ae57cd57326..23d4761842a0ed9ac62eb1af5cc03259aadeb22d 100644 (file)
@@ -1297,10 +1297,6 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
 
        rc = tipc_bcast_rcv(net, be->link, skb);
 
-       /* Broadcast link reset may happen at reassembly failure */
-       if (rc & TIPC_LINK_DOWN_EVT)
-               tipc_node_reset_links(n);
-
        /* Broadcast ACKs are sent on a unicast link */
        if (rc & TIPC_LINK_SND_BC_ACK) {
                tipc_node_read_lock(n);
@@ -1320,6 +1316,17 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
                spin_unlock_bh(&be->inputq2.lock);
                tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
        }
+
+       if (rc & TIPC_LINK_DOWN_EVT) {
+               /* Reception reassembly failure => reset all links to peer */
+               if (!tipc_link_is_up(be->link))
+                       tipc_node_reset_links(n);
+
+               /* Retransmission failure => reset all links to all peers */
+               if (!tipc_link_is_up(tipc_bc_sndlink(net)))
+                       tipc_bearer_reset_all(net);
+       }
+
        tipc_node_put(n);
 }
 
index 88bfcd707064145cac007eb7ff88fb1a2eb77418..c49b8df438cbeee021bbedf3c96631d82fb16670 100644 (file)
@@ -796,9 +796,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
  * @tsk: receiving socket
  * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+                             struct sk_buff_head *xmitq)
 {
        struct sock *sk = &tsk->sk;
+       u32 onode = tsk_own_node(tsk);
        struct tipc_msg *hdr = buf_msg(skb);
        int mtyp = msg_type(hdr);
        bool conn_cong;
@@ -811,7 +813,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 
        if (mtyp == CONN_PROBE) {
                msg_set_type(hdr, CONN_PROBE_REPLY);
-               tipc_sk_respond(sk, skb, TIPC_OK);
+               if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+                       __skb_queue_tail(xmitq, skb);
                return;
        } else if (mtyp == CONN_ACK) {
                conn_cong = tsk_conn_cong(tsk);
@@ -1686,7 +1689,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
  *
  * Returns true if message was added to socket receive queue, otherwise false
  */
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+                      struct sk_buff_head *xmitq)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
@@ -1696,7 +1700,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
        int usr = msg_user(hdr);
 
        if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
-               tipc_sk_proto_rcv(tsk, skb);
+               tipc_sk_proto_rcv(tsk, skb, xmitq);
                return false;
        }
 
@@ -1739,7 +1743,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
        return true;
 
 reject:
-       tipc_sk_respond(sk, skb, err);
+       if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+               __skb_queue_tail(xmitq, skb);
        return false;
 }
 
@@ -1755,9 +1760,24 @@ reject:
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
        unsigned int truesize = skb->truesize;
+       struct sk_buff_head xmitq;
+       u32 dnode, selector;
 
-       if (likely(filter_rcv(sk, skb)))
+       __skb_queue_head_init(&xmitq);
+
+       if (likely(filter_rcv(sk, skb, &xmitq))) {
                atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+               return 0;
+       }
+
+       if (skb_queue_empty(&xmitq))
+               return 0;
+
+       /* Send response/rejected message */
+       skb = __skb_dequeue(&xmitq);
+       dnode = msg_destnode(buf_msg(skb));
+       selector = msg_origport(buf_msg(skb));
+       tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
        return 0;
 }
 
@@ -1771,12 +1791,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  * Caller must hold socket lock
  */
 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-                           u32 dport)
+                           u32 dport, struct sk_buff_head *xmitq)
 {
+       unsigned long time_limit = jiffies + 2;
+       struct sk_buff *skb;
        unsigned int lim;
        atomic_t *dcnt;
-       struct sk_buff *skb;
-       unsigned long time_limit = jiffies + 2;
+       u32 onode;
 
        while (skb_queue_len(inputq)) {
                if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1788,7 +1809,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
 
                /* Add message directly to receive queue if possible */
                if (!sock_owned_by_user(sk)) {
-                       filter_rcv(sk, skb);
+                       filter_rcv(sk, skb, xmitq);
                        continue;
                }
 
@@ -1801,7 +1822,9 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                        continue;
 
                /* Overload => reject message back to sender */
-               tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+               onode = tipc_own_addr(sock_net(sk));
+               if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+                       __skb_queue_tail(xmitq, skb);
                break;
        }
 }
@@ -1814,12 +1837,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
  */
 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
+       struct sk_buff_head xmitq;
        u32 dnode, dport = 0;
        int err;
        struct tipc_sock *tsk;
        struct sock *sk;
        struct sk_buff *skb;
 
+       __skb_queue_head_init(&xmitq);
        while (skb_queue_len(inputq)) {
                dport = tipc_skb_peek_port(inputq, dport);
                tsk = tipc_sk_lookup(net, dport);
@@ -1827,9 +1852,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
                if (likely(tsk)) {
                        sk = &tsk->sk;
                        if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-                               tipc_sk_enqueue(inputq, sk, dport);
+                               tipc_sk_enqueue(inputq, sk, dport, &xmitq);
                                spin_unlock_bh(&sk->sk_lock.slock);
                        }
+                       /* Send pending response/rejected messages, if any */
+                       while ((skb = __skb_dequeue(&xmitq))) {
+                               dnode = msg_destnode(buf_msg(skb));
+                               tipc_node_xmit_skb(net, skb, dnode, dport);
+                       }
                        sock_put(sk);
                        continue;
                }
index b5f1221f48d4859156aa640066e1fd80cf2927dc..b96ac918e0ba026d74e2a02dda99b8eb0f613267 100644 (file)
  * function will also cleanup rejected sockets, those that reach the connected
  * state but leave it before they have been accepted.
  *
+ * - Lock ordering for pending or accept queue sockets is:
+ *
+ *     lock_sock(listener);
+ *     lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
+ *
+ * Using explicit nested locking keeps lockdep happy since normally only one
+ * lock of a given class may be taken at a time.
+ *
  * - Sockets created by user action will be cleaned up when the user process
  * calls close(2), causing our release implementation to be called. Our release
  * implementation will perform some cleanup then drop the last reference so our
@@ -443,7 +451,7 @@ void vsock_pending_work(struct work_struct *work)
        cleanup = true;
 
        lock_sock(listener);
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 
        if (vsock_is_pending(sk)) {
                vsock_remove_pending(listener, sk);
@@ -1292,7 +1300,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
        if (connected) {
                listener->sk_ack_backlog--;
 
-               lock_sock(connected);
+               lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
                vconnected = vsock_sk(connected);
 
                /* If the listener socket has received an error, then we should
index d7599014055dfc1c3d256d917e3a3645f1a27143..7d72283901a3bb8fa7d3294e5dcdfbc6cd61e3a2 100644 (file)
@@ -3487,16 +3487,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                params.smps_mode = NL80211_SMPS_OFF;
        }
 
+       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
+               return -EOPNOTSUPP;
+
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
                params.acl = parse_acl_data(&rdev->wiphy, info);
                if (IS_ERR(params.acl))
                        return PTR_ERR(params.acl);
        }
 
-       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
-               return -EOPNOTSUPP;
-
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
index 4e809e978b7d274f3967349c87acda43aac57370..b7d1592bd5b8939f50d01ee2ce11a95c0df0e93b 100644 (file)
@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
                 * replace EtherType */
                hdrlen += ETH_ALEN + 2;
        else
-               tmp.h_proto = htons(skb->len);
+               tmp.h_proto = htons(skb->len - hdrlen);
 
        pskb_pull(skb, hdrlen);
 
@@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
         * alignment since sizeof(struct ethhdr) is 14.
         */
        frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+       if (!frame)
+               return NULL;
 
        skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
        skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
index 52e4e61140d1226f5ba80676d1973baa298d3b23..2573543842d06947cf081ff3cf752c8d309f094b 100644 (file)
@@ -1,2 +1,3 @@
 *.pyc
 *.pyo
+constants.py
index cd129e65d1ffdbbb06e6f1096340e00f1cff8d9e..8b00031f53497035b68bfe65bd682989430fe016 100644 (file)
@@ -13,9 +13,11 @@ quiet_cmd_gen_constants_py = GEN     $@
        $(CPP) -E -x c -P $(c_flags) $< > $@ ;\
        sed -i '1,/<!-- end-c-headers -->/d;' $@
 
-$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
-       $(call if_changed,gen_constants_py)
+targets += constants.py
+$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
+       $(call if_changed_dep,gen_constants_py)
 
 build_constants_py: $(obj)/constants.py
+       @:
 
 clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
index 07e6c2befe368665ed7b35bd4fa9f08e1600d4dc..7986f4e0da123a240eeca854666dd3cfac86d69b 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/fs.h>
 #include <linux/mount.h>
-#include <linux/radix-tree.h>
 
 /* We need to stringify expanded macros so that they can be parsed */
 
@@ -51,9 +50,3 @@ LX_VALUE(MNT_NOEXEC)
 LX_VALUE(MNT_NOATIME)
 LX_VALUE(MNT_NODIRATIME)
 LX_VALUE(MNT_RELATIME)
-
-/* linux/radix-tree.h */
-LX_VALUE(RADIX_TREE_INDIRECT_PTR)
-LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
-LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
-LX_GDBPARSED(RADIX_TREE_MAP_MASK)
diff --git a/scripts/gdb/linux/radixtree.py b/scripts/gdb/linux/radixtree.py
deleted file mode 100644 (file)
index 0fdef4e..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# gdb helper commands and functions for Linux kernel debugging
-#
-#  Radix Tree Parser
-#
-# Copyright (c) 2016 Linaro Ltd
-#
-# Authors:
-#  Kieran Bingham <kieran.bingham@linaro.org>
-#
-# This work is licensed under the terms of the GNU GPL version 2.
-#
-
-import gdb
-
-from linux import utils
-from linux import constants
-
-radix_tree_root_type = utils.CachedType("struct radix_tree_root")
-radix_tree_node_type = utils.CachedType("struct radix_tree_node")
-
-
-def is_indirect_ptr(node):
-    long_type = utils.get_long_type()
-    return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
-
-
-def indirect_to_ptr(node):
-    long_type = utils.get_long_type()
-    node_type = node.type
-    indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
-    return indirect_ptr.cast(node_type)
-
-
-def maxindex(height):
-    height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
-    return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
-
-
-def lookup(root, index):
-    if root.type == radix_tree_root_type.get_type().pointer():
-        root = root.dereference()
-    elif root.type != radix_tree_root_type.get_type():
-        raise gdb.GdbError("Must be struct radix_tree_root not {}"
-                           .format(root.type))
-
-    node = root['rnode']
-    if node is 0:
-        return None
-
-    if not (is_indirect_ptr(node)):
-        if (index > 0):
-            return None
-        return node
-
-    node = indirect_to_ptr(node)
-
-    height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
-    if (index > maxindex(height)):
-        return None
-
-    shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
-
-    while True:
-        new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
-        slot = node['slots'][new_index]
-
-        node = slot.cast(node.type.pointer()).dereference()
-        if node is 0:
-            return None
-
-        shift -= constants.LX_RADIX_TREE_MAP_SHIFT
-        height -= 1
-
-        if (height <= 0):
-            break
-
-    return node
-
-
-class LxRadixTree(gdb.Function):
-    """ Lookup and return a node from a RadixTree.
-
-$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
-If index is omitted, the root node is dereferenced and returned."""
-
-    def __init__(self):
-        super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
-
-    def invoke(self, root, index=0):
-        result = lookup(root, index)
-        if result is None:
-            raise gdb.GdbError("No entry in tree at index {}".format(index))
-
-        return result
-
-LxRadixTree()
index 9a0f8923f67ccb870224a93d8884458a1fa11a30..004b0ac7fa72d25598d26fbab4b2035e3e882305 100644 (file)
@@ -153,7 +153,7 @@ lx-symbols command."""
             saved_state['breakpoint'].enabled = saved_state['enabled']
 
     def invoke(self, arg, from_tty):
-        self.module_paths = arg.split()
+        self.module_paths = [os.path.expanduser(p) for p in arg.split()]
         self.module_paths.append(os.getcwd())
 
         # enforce update
index 3a80ad6eecad7167c3bb11fb2314f1d1c43c0a89..6e0b0afd888ade32768edba8e659b61a4f396ac0 100644 (file)
@@ -31,4 +31,3 @@ else:
     import linux.lists
     import linux.proc
     import linux.constants
-    import linux.radixtree
index 2660fbcf94d1e0e8affa72d4656584bc0aa06216..7798e1608f4f4bb429c94017e2c6f73b07a7afc9 100644 (file)
@@ -500,34 +500,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
 {
        struct common_audit_data sa;
        struct apparmor_audit_data aad = {0,};
-       char *command, *args = value;
+       char *command, *largs = NULL, *args = value;
        size_t arg_size;
        int error;
 
        if (size == 0)
                return -EINVAL;
-       /* args points to a PAGE_SIZE buffer, AppArmor requires that
-        * the buffer must be null terminated or have size <= PAGE_SIZE -1
-        * so that AppArmor can null terminate them
-        */
-       if (args[size - 1] != '\0') {
-               if (size == PAGE_SIZE)
-                       return -EINVAL;
-               args[size] = '\0';
-       }
-
        /* task can only write its own attributes */
        if (current != task)
                return -EACCES;
 
-       args = value;
+       /* AppArmor requires that the buffer must be null terminated atm */
+       if (args[size - 1] != '\0') {
+               /* null terminate */
+               largs = args = kmalloc(size + 1, GFP_KERNEL);
+               if (!args)
+                       return -ENOMEM;
+               memcpy(args, value, size);
+               args[size] = '\0';
+       }
+
+       error = -EINVAL;
        args = strim(args);
        command = strsep(&args, " ");
        if (!args)
-               return -EINVAL;
+               goto out;
        args = skip_spaces(args);
        if (!*args)
-               return -EINVAL;
+               goto out;
 
        arg_size = size - (args - (char *) value);
        if (strcmp(name, "current") == 0) {
@@ -553,10 +553,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
                        goto fail;
        } else
                /* only support the "current" and "exec" process attributes */
-               return -EINVAL;
+               goto fail;
 
        if (!error)
                error = size;
+out:
+       kfree(largs);
        return error;
 
 fail:
@@ -565,9 +567,9 @@ fail:
        aad.profile = aa_current_profile();
        aad.op = OP_SETPROCATTR;
        aad.info = name;
-       aad.error = -EINVAL;
+       aad.error = error = -EINVAL;
        aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
-       return -EINVAL;
+       goto out;
 }
 
 static int apparmor_task_setrlimit(struct task_struct *task,
index a85d45595d02a265f1f8e1a4cd36c812d8cd08f0..b4fe9b00251251d596fbfbce2dd87eb44841b917 100644 (file)
@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
        
        if (snd_BUG_ON(!card || !id))
                return;
+       if (card->shutdown)
+               return;
        read_lock(&card->ctl_files_rwlock);
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
        card->mixer_oss_change_count++;
index 308c9ecf73db18415aa1157ee4e895731a5abc36..8e980aa678d0d8412e951d78f3026f25e6f6badb 100644 (file)
@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
 }
 EXPORT_SYMBOL(snd_pcm_new_internal);
 
+static void free_chmap(struct snd_pcm_str *pstr)
+{
+       if (pstr->chmap_kctl) {
+               snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
+               pstr->chmap_kctl = NULL;
+       }
+}
+
 static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
 {
        struct snd_pcm_substream *substream, *substream_next;
@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
                kfree(setup);
        }
 #endif
+       free_chmap(pstr);
        if (pstr->substream_count)
                put_device(&pstr->dev);
 }
@@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
        for (cidx = 0; cidx < 2; cidx++) {
                if (!pcm->internal)
                        snd_unregister_device(&pcm->streams[cidx].dev);
-               if (pcm->streams[cidx].chmap_kctl) {
-                       snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
-                       pcm->streams[cidx].chmap_kctl = NULL;
-               }
+               free_chmap(&pcm->streams[cidx]);
        }
        mutex_unlock(&pcm->open_mutex);
        mutex_unlock(&register_mutex);
index e722022d325d7771d0d516c08ad830d814eeebdc..9a6157ea6881703310586bea76f5e1c61fcfb2a5 100644 (file)
@@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                qhead = tu->qhead++;
                tu->qhead %= tu->queue_size;
+               tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
                if (tu->tread) {
@@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                }
 
                spin_lock_irq(&tu->qlock);
-               tu->qused--;
                if (err < 0)
                        goto _error;
                result += unit;
index 4a054d72011246db38fbdba68c25f3da0c0ce372..d3125c16968457436de5b659da2127157b5fbcba 100644 (file)
@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
        int page, p, pp, delta, i;
 
        page =
-           (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
-            WT_SUBBUF_MASK)
-           >> WT_SUBBUF_SHIFT;
+           (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
+            >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
        if (dma->nr_periods >= 4)
                delta = (page - dma->period_real) & 3;
        else {
index 1cb85aeb0cea058d35e5ed4b48b1ce6f9d166ff2..286f5e3686a3e7aa7a504dbf68cfb2104bdd4bdb 100644 (file)
@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
        u32 pipe_alloc_mask;
        int err;
 
-       commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
+       commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
        if (commpage_bak == NULL)
                return -ENOMEM;
        commpage = chip->comm_page;
-       memcpy(commpage_bak, commpage, sizeof(struct comm_page));
+       memcpy(commpage_bak, commpage, sizeof(*commpage));
 
        err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
        if (err < 0) {
index 320445f3bf736d51e3dcfc884cdf5f7aef0b3855..79c7b340acc2361518b51504b7ecb032a71991b2 100644 (file)
@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
 
        for (n = 0; n < spec->paths.used; n++) {
                path = snd_array_elem(&spec->paths, n);
+               if (!path->depth)
+                       continue;
                if (path->path[0] == nid ||
                    path->path[path->depth - 1] == nid) {
                        bool pin_old = path->pin_enabled;
index 94089fc71884a93e38ddbbefdad9a978d02430c6..6f8ea13323c1819c27dbe10f2af0293415f06135 100644 (file)
@@ -367,9 +367,10 @@ enum {
 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
 #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
+#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
 #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
-                       IS_KBL(pci) || IS_KBL_LP(pci)
+                       IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -1217,8 +1218,10 @@ static int azx_free(struct azx *chip)
        if (use_vga_switcheroo(hda)) {
                if (chip->disabled && hda->probe_continued)
                        snd_hda_unlock_devices(&chip->bus);
-               if (hda->vga_switcheroo_registered)
+               if (hda->vga_switcheroo_registered) {
                        vga_switcheroo_unregister_client(chip->pci);
+                       vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
+               }
        }
 
        if (bus->chip_init) {
@@ -2190,6 +2193,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Kabylake-LP */
        { PCI_DEVICE(0x8086, 0x9d71),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake-H */
+       { PCI_DEVICE(0x8086, 0xa2f0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
@@ -2263,6 +2269,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x157a),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x15b3),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
index 900bfbc3368c3ffc70f74a4b7b2ed41cbf550290..abcb5a6a1cd9547db9c83e1cada1456615d47db8 100644 (file)
@@ -5651,6 +5651,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
        SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5737,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {}
 };
 #define ALC225_STANDARD_PINS \
-       {0x12, 0xb7a60130}, \
        {0x21, 0x04211020}
 
 #define ALC256_STANDARD_PINS \
@@ -5762,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
                {0x14, 0x901701a0}),
        SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
                {0x14, 0x901701b0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60150},
+               {0x14, 0x901701a0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60150},
+               {0x14, 0x901701b0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
+               {0x1b, 0x90170110}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
index 4d82a58ff6b0b9c43c9f6f892ecbd6ab79e44de8..f3fb98f0a995bb41dd473ccc4443164e404c9883 100644 (file)
@@ -483,9 +483,10 @@ config SND_SOC_DMIC
        tristate
 
 config SND_SOC_HDMI_CODEC
-       tristate
-       select SND_PCM_ELD
-       select SND_PCM_IEC958
+       tristate
+       select SND_PCM_ELD
+       select SND_PCM_IEC958
+       select HDMI
 
 config SND_SOC_ES8328
        tristate "Everest Semi ES8328 CODEC"
index 647f69de6baac94c3d6aef8a9ff297894ec0c47b..5013d2ba0c10a968045d83e5e5cef6e223e55436 100644 (file)
@@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
        .max_register           = 0x16,
        .reg_defaults           = ak4613_reg,
        .num_reg_defaults       = ARRAY_SIZE(ak4613_reg),
+       .cache_type             = REGCACHE_RBTREE,
 };
 
 static const struct of_device_id ak4613_of_match[] = {
@@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client)
 static struct i2c_driver ak4613_i2c_driver = {
        .driver = {
                .name = "ak4613-codec",
-               .owner = THIS_MODULE,
                .of_match_table = ak4613_of_match,
        },
        .probe          = ak4613_i2c_probe,
index d6f4abbbf8a7fd669237d667a03825c13c3603ed..fb3885fe0afb75a872a8d08f7d6fb931555e2446 100644 (file)
@@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty)
        if (!tty->disc_data)
                return -ENODEV;
 
+       tty->receive_room = 16;
        if (tty->ops->write(tty, v253_init, len) != len) {
                ret = -EIO;
                goto err;
index 181cd3bf0b926c750257eee7351558405e2a7e30..2abb742fc47b53f19a3a2024fd6424a86c63c631 100644 (file)
@@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
         * exit, we call pm_runtime_suspend() so that will do for us
         */
        hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       if (!hlink) {
+               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
        ret = create_fill_widget_route_map(dapm);
@@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
        /* hold the ref while we probe */
        hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       if (!hlink) {
+               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
        hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
@@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
        }
 
        hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+       if (!hlink) {
+               dev_err(dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_put(ebus, hlink);
 
        return 0;
@@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
                return 0;
 
        hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+       if (!hlink) {
+               dev_err(dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(ebus, hlink);
 
        err = snd_hdac_display_power(bus, true);
index 3c6594da6c9c95c8abb198706cac920bae3959da..d70847c9eeb03d8aeba2c316eadbaf68aed237bc 100644 (file)
@@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = {
        { 0x2b, 0x5454 },
        { 0x2c, 0xaaa0 },
        { 0x2d, 0x0000 },
-       { 0x2f, 0x1002 },
+       { 0x2f, 0x5002 },
        { 0x31, 0x5000 },
        { 0x32, 0x0000 },
        { 0x33, 0x0000 },
index 49a9e7049e2ba1457621cd549ca3722cce0a0688..0af5ddbef1daaa0b4fa7dbfc89c72c52b6a84bfd 100644 (file)
@@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
                RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
                RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
-               39, 0, out_vol_tlv),
+               39, 1, out_vol_tlv),
        /* OUTPUT Control */
        SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
                RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
index da60e3fe5ee7afb37740b750e0f15f6156d6a4eb..e7fe6b7b95b7fc8e29b3da06f770c9d52f8bd155 100644 (file)
@@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
                .capture = {
                        .stream_name = "Audio Trace CPU",
                        .channels_min = 1,
-                       .channels_max = 6,
+                       .channels_max = 4,
                        .rates = WM5102_RATES,
                        .formats = WM5102_FORMATS,
                },
index b5820e4d547170a4a02ce0a46b09a3712ea42ce1..d54f1b46c9ec08107427ff0f6bf81bf4670103bb 100644 (file)
@@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "OUT2L", NULL, "SYSCLK" },
        { "OUT2R", NULL, "SYSCLK" },
        { "OUT3L", NULL, "SYSCLK" },
+       { "OUT3R", NULL, "SYSCLK" },
        { "OUT4L", NULL, "SYSCLK" },
        { "OUT4R", NULL, "SYSCLK" },
        { "OUT5L", NULL, "SYSCLK" },
index f6f9395ea38ef88b40b31e70dafddf27ea6f4945..1c600819f7689b451ae4da81922649961de47c08 100644 (file)
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
        .max_register = WM8940_MONOMIX,
        .reg_defaults = wm8940_reg_defaults,
        .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
+       .cache_type = REGCACHE_RBTREE,
 
        .readable_reg = wm8940_readable_register,
        .volatile_reg = wm8940_volatile_register,
index 0f66fda2c7727c3c0821c97228054234b7fcbdba..237dc67002efbefd600f96bb76997d21890a8b9f 100644 (file)
@@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
 };
 
 static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
-       .tx_dma_offset = 0x200,
-       .rx_dma_offset = 0x284,
+       /* The CFG port offset will be calculated if it is needed */
+       .tx_dma_offset = 0,
+       .rx_dma_offset = 0,
        .version = MCASP_VERSION_4,
 };
 
@@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
        return PCM_EDMA;
 }
 
+static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+       int i;
+       u32 offset = 0;
+
+       if (pdata->version != MCASP_VERSION_4)
+               return pdata->tx_dma_offset;
+
+       for (i = 0; i < pdata->num_serializer; i++) {
+               if (pdata->serial_dir[i] == TX_MODE) {
+                       if (!offset) {
+                               offset = DAVINCI_MCASP_TXBUF_REG(i);
+                       } else {
+                               pr_err("%s: Only one serializer allowed!\n",
+                                      __func__);
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
+static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+       int i;
+       u32 offset = 0;
+
+       if (pdata->version != MCASP_VERSION_4)
+               return pdata->rx_dma_offset;
+
+       for (i = 0; i < pdata->num_serializer; i++) {
+               if (pdata->serial_dir[i] == RX_MODE) {
+                       if (!offset) {
+                               offset = DAVINCI_MCASP_RXBUF_REG(i);
+                       } else {
+                               pr_err("%s: Only one serializer allowed!\n",
+                                      __func__);
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
        struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        if (dat)
                dma_data->addr = dat->start;
        else
-               dma_data->addr = mem->start + pdata->tx_dma_offset;
+               dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
 
        dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                if (dat)
                        dma_data->addr = dat->start;
                else
-                       dma_data->addr = mem->start + pdata->rx_dma_offset;
+                       dma_data->addr =
+                               mem->start + davinci_mcasp_rxdma_offset(pdata);
 
                dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
                res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
index 1e8787fb3fb766bf386e51b629e2634294712a85..afddc8010c5415966d52aa549c2b7115b916d1f0 100644 (file)
@@ -85,9 +85,9 @@
                                                (n << 2))
 
 /* Transmit Buffer for Serializer n */
-#define DAVINCI_MCASP_TXBUF_REG                0x200
+#define DAVINCI_MCASP_TXBUF_REG(n)     (0x200 + (n << 2))
 /* Receive Buffer for Serializer n */
-#define DAVINCI_MCASP_RXBUF_REG                0x280
+#define DAVINCI_MCASP_RXBUF_REG(n)     (0x280 + (n << 2))
 
 /* McASP FIFO Registers */
 #define DAVINCI_MCASP_V2_AFIFO_BASE    (0x1010)
index 632ecc0e39562aab90a880e0c8b678a4ccea1078..bedec4a325813f92d752b16833ccfb0ba31b2db7 100644 (file)
@@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
        ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
+               regmap_update_bits(regs, CCSR_SSI_STCCR,
+                                  CCSR_SSI_SxCCR_DC_MASK,
+                                  CCSR_SSI_SxCCR_DC(2));
+               regmap_update_bits(regs, CCSR_SSI_SRCCR,
+                                  CCSR_SSI_SxCCR_DC_MASK,
+                                  CCSR_SSI_SxCCR_DC(2));
                switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
                case SND_SOC_DAIFMT_CBM_CFS:
                case SND_SOC_DAIFMT_CBS_CFS:
                        ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
-                       regmap_update_bits(regs, CCSR_SSI_STCCR,
-                                       CCSR_SSI_SxCCR_DC_MASK,
-                                       CCSR_SSI_SxCCR_DC(2));
-                       regmap_update_bits(regs, CCSR_SSI_SRCCR,
-                                       CCSR_SSI_SxCCR_DC_MASK,
-                                       CCSR_SSI_SxCCR_DC(2));
                        break;
                case SND_SOC_DAIFMT_CBM_CFM:
                        ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
index 395168986462d2ef97659281ec1bf088c806b5fc..1bead81bb5106b590259780d4bcdef86dba5fc03 100644 (file)
@@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
        case SNDRV_PCM_TRIGGER_START:
                if (stream->compr_ops->stream_start)
                        return stream->compr_ops->stream_start(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_STOP:
                if (stream->compr_ops->stream_drop)
                        return stream->compr_ops->stream_drop(sst->dev, stream->id);
+               break;
        case SND_COMPR_TRIGGER_DRAIN:
                if (stream->compr_ops->stream_drain)
                        return stream->compr_ops->stream_drain(sst->dev, stream->id);
+               break;
        case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
                if (stream->compr_ops->stream_partial_drain)
                        return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                if (stream->compr_ops->stream_pause)
                        return stream->compr_ops->stream_pause(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                if (stream->compr_ops->stream_pause_release)
                        return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
-       default:
-               return -EINVAL;
+               break;
        }
+       return -EINVAL;
 }
 
 static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
index 965ce40ce7520a37520b116d76ba1885aaaaeb7c..8b95e09e23e8bd3fe9ddd886538b9f07d8a4c07d 100644 (file)
@@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
        sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
                        SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
 
+       INIT_LIST_HEAD(&sst->module_list);
        ret = skl_ipc_init(dev, skl);
        if (ret)
                return ret;
index 49354d17ea553795e45f7f04e4603429dbca70f1..c4c51a4d3c8fd7fdbeec39defae6745500726e1a 100644 (file)
@@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                }
        }
 
-       rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr);
+       rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
        rsnd_mod_write(adg_mod, BRRA,  rbga);
        rsnd_mod_write(adg_mod, BRRB,  rbgb);
 
index 69860da473ea8b9d7ee853e9dfcdcc804690983f..9e5276d6dda05c999fcba24ff35ab7345513c6da 100644 (file)
@@ -556,7 +556,6 @@ static int usb_audio_probe(struct usb_interface *intf,
                                goto __error;
                        }
                        chip = usb_chip[i];
-                       dev_set_drvdata(&dev->dev, chip);
                        atomic_inc(&chip->active); /* avoid autopm */
                        break;
                }
@@ -582,6 +581,7 @@ static int usb_audio_probe(struct usb_interface *intf,
                        goto __error;
                }
        }
+       dev_set_drvdata(&dev->dev, chip);
 
        /*
         * For devices with more than one control interface, we assume the
index e8a1e69eb92c5235735d42847e4f34b2e737a972..25d803148f5c6e0c9bb3109db1616a2e95fd4359 100644 (file)
@@ -122,10 +122,14 @@ static bool ignore_func(struct objtool_file *file, struct symbol *func)
 
        /* check for STACK_FRAME_NON_STANDARD */
        if (file->whitelist && file->whitelist->rela)
-               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list)
-                       if (rela->sym->sec == func->sec &&
+               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+                       if (rela->sym->type == STT_SECTION &&
+                           rela->sym->sec == func->sec &&
                            rela->addend == func->offset)
                                return true;
+                       if (rela->sym->type == STT_FUNC && rela->sym == func)
+                               return true;
+               }
 
        /* check if it has a context switching instruction */
        func_for_each_insn(file, func, insn)
index b7447ceb75e9816509286f711030c0085703fdf2..b0ac057417500d3f7d41b180fe09c459c7cf5de5 100644 (file)
@@ -122,7 +122,7 @@ enum {
        NODE_TAGGED = 2,
 };
 
-#define THRASH_SIZE            1000 * 1000
+#define THRASH_SIZE            (1000 * 1000)
 #define N 127
 #define BATCH  33
 
index 7cf6e1769903976f400b94d3c0c3a8734002b51a..b9d34b37c017be2ea35b9083877965f8aa224060 100644 (file)
@@ -510,10 +510,11 @@ static void slab_stats(struct slabinfo *s)
                        s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
        }
 
-       if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail)
+       if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
                printf("\nCmpxchg_double Looping\n------------------------\n");
                printf("Locked Cmpxchg Double redos   %lu\nUnlocked Cmpxchg Double redos %lu\n",
                        s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
+       }
 }
 
 static void report(struct slabinfo *s)